Move `Channel::get_*_pending_htlc_stats` to `ChannelContext` impl
[rust-lightning] / lightning / src / ln / channelmanager.rs
index a41fb676e935746b25dd9903405d323f5fe5e754..af51595650ab9a1b586331ab18719e6a5d3023c4 100644 (file)
@@ -19,7 +19,7 @@
 
 use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::blockdata::constants::{genesis_block, ChainHash};
 use bitcoin::network::constants::Network;
 
 use bitcoin::hashes::Hash;
@@ -56,7 +56,7 @@ use crate::ln::outbound_payment;
 use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
 use crate::ln::wire::Encode;
 use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
-use crate::util::config::{UserConfig, ChannelConfig};
+use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
 use crate::util::scid_utils::fake_scid;
 use crate::util::string::UntrustedString;
@@ -112,6 +112,8 @@ pub(super) enum PendingHTLCRouting {
                phantom_shared_secret: Option<[u8; 32]>,
        },
        ReceiveKeysend {
+               /// This was added in 0.0.116 and will break deserialization on downgrades.
+               payment_data: Option<msgs::FinalOnionHopData>,
                payment_preimage: PaymentPreimage,
                payment_metadata: Option<Vec<u8>>,
                incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
@@ -1373,8 +1375,14 @@ pub struct ChannelDetails {
        /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
        /// to use a limit as close as possible to the HTLC limit we can currently send.
        ///
-       /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+       /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+       /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
        pub next_outbound_htlc_limit_msat: u64,
+       /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
+       /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
+       /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
+       /// route which is valid.
+       pub next_outbound_htlc_minimum_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
        /// available for inclusion in new inbound HTLCs).
@@ -1465,46 +1473,47 @@ impl ChannelDetails {
                let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
                        channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                ChannelDetails {
-                       channel_id: channel.channel_id(),
+                       channel_id: channel.context.channel_id(),
                        counterparty: ChannelCounterparty {
-                               node_id: channel.get_counterparty_node_id(),
+                               node_id: channel.context.get_counterparty_node_id(),
                                features: latest_features,
                                unspendable_punishment_reserve: to_remote_reserve_satoshis,
-                               forwarding_info: channel.counterparty_forwarding_info(),
+                               forwarding_info: channel.context.counterparty_forwarding_info(),
                                // Ensures that we have actually received the `htlc_minimum_msat` value
                                // from the counterparty through the `OpenChannel` or `AcceptChannel`
                                // message (as they are always the first message from the counterparty).
                                // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
                                // default `0` value set by `Channel::new_outbound`.
-                               outbound_htlc_minimum_msat: if channel.have_received_message() {
-                                       Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
-                               outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
+                               outbound_htlc_minimum_msat: if channel.context.have_received_message() {
+                                       Some(channel.context.get_counterparty_htlc_minimum_msat()) } else { None },
+                               outbound_htlc_maximum_msat: channel.context.get_counterparty_htlc_maximum_msat(),
                        },
-                       funding_txo: channel.get_funding_txo(),
+                       funding_txo: channel.context.get_funding_txo(),
                        // Note that accept_channel (or open_channel) is always the first message, so
                        // `have_received_message` indicates that type negotiation has completed.
-                       channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
-                       short_channel_id: channel.get_short_channel_id(),
-                       outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
-                       inbound_scid_alias: channel.latest_inbound_scid_alias(),
-                       channel_value_satoshis: channel.get_value_satoshis(),
-                       feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
+                       channel_type: if channel.context.have_received_message() { Some(channel.context.get_channel_type().clone()) } else { None },
+                       short_channel_id: channel.context.get_short_channel_id(),
+                       outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None },
+                       inbound_scid_alias: channel.context.latest_inbound_scid_alias(),
+                       channel_value_satoshis: channel.context.get_value_satoshis(),
+                       feerate_sat_per_1000_weight: Some(channel.context.get_feerate_sat_per_1000_weight()),
                        unspendable_punishment_reserve: to_self_reserve_satoshis,
                        balance_msat: balance.balance_msat,
                        inbound_capacity_msat: balance.inbound_capacity_msat,
                        outbound_capacity_msat: balance.outbound_capacity_msat,
                        next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
-                       user_channel_id: channel.get_user_id(),
-                       confirmations_required: channel.minimum_depth(),
-                       confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
-                       force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
-                       is_outbound: channel.is_outbound(),
-                       is_channel_ready: channel.is_usable(),
-                       is_usable: channel.is_live(),
-                       is_public: channel.should_announce(),
-                       inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
-                       inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
-                       config: Some(channel.config()),
+                       next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
+                       user_channel_id: channel.context.get_user_id(),
+                       confirmations_required: channel.context.minimum_depth(),
+                       confirmations: Some(channel.context.get_funding_tx_confirmations(best_block_height)),
+                       force_close_spend_delay: channel.context.get_counterparty_selected_contest_delay(),
+                       is_outbound: channel.context.is_outbound(),
+                       is_channel_ready: channel.context.is_usable(),
+                       is_usable: channel.context.is_live(),
+                       is_public: channel.context.should_announce(),
+                       inbound_htlc_minimum_msat: Some(channel.context.get_holder_htlc_minimum_msat()),
+                       inbound_htlc_maximum_msat: channel.context.get_holder_htlc_maximum_msat(),
+                       config: Some(channel.context.config()),
                }
        }
 }
@@ -1606,9 +1615,9 @@ macro_rules! handle_error {
 
 macro_rules! update_maps_on_chan_removal {
        ($self: expr, $channel: expr) => {{
-               $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+               $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id());
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
-               if let Some(short_id) = $channel.get_short_channel_id() {
+               if let Some(short_id) = $channel.context.get_short_channel_id() {
                        short_to_chan_info.remove(&short_id);
                } else {
                        // If the channel was never confirmed on-chain prior to its closure, remove the
@@ -1617,10 +1626,10 @@ macro_rules! update_maps_on_chan_removal {
                        // also don't want a counterparty to be able to trivially cause a memory leak by simply
                        // opening a million channels with us which are closed before we ever reach the funding
                        // stage.
-                       let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
+                       let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias());
                        debug_assert!(alias_removed);
                }
-               short_to_chan_info.remove(&$channel.outbound_scid_alias());
+               short_to_chan_info.remove(&$channel.context.outbound_scid_alias());
        }}
 }
 
@@ -1638,7 +1647,7 @@ macro_rules! convert_chan_err {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
                                update_maps_on_chan_removal!($self, $channel);
                                let shutdown_res = $channel.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                }
@@ -1688,18 +1697,18 @@ macro_rules! remove_channel {
 macro_rules! send_channel_ready {
        ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
                $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
-                       node_id: $channel.get_counterparty_node_id(),
+                       node_id: $channel.context.get_counterparty_node_id(),
                        msg: $channel_ready_msg,
                });
                // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
-               let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
-               assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+               let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+               assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
                        "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
-               if let Some(real_scid) = $channel.get_short_channel_id() {
-                       let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
-                       assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+               if let Some(real_scid) = $channel.context.get_short_channel_id() {
+                       let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+                       assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
                                "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                }
        }}
@@ -1707,30 +1716,30 @@ macro_rules! send_channel_ready {
 
 macro_rules! emit_channel_pending_event {
        ($locked_events: expr, $channel: expr) => {
-               if $channel.should_emit_channel_pending_event() {
+               if $channel.context.should_emit_channel_pending_event() {
                        $locked_events.push_back((events::Event::ChannelPending {
-                               channel_id: $channel.channel_id(),
-                               former_temporary_channel_id: $channel.temporary_channel_id(),
-                               counterparty_node_id: $channel.get_counterparty_node_id(),
-                               user_channel_id: $channel.get_user_id(),
-                               funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               channel_id: $channel.context.channel_id(),
+                               former_temporary_channel_id: $channel.context.temporary_channel_id(),
+                               counterparty_node_id: $channel.context.get_counterparty_node_id(),
+                               user_channel_id: $channel.context.get_user_id(),
+                               funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
                        }, None));
-                       $channel.set_channel_pending_event_emitted();
+                       $channel.context.set_channel_pending_event_emitted();
                }
        }
 }
 
 macro_rules! emit_channel_ready_event {
        ($locked_events: expr, $channel: expr) => {
-               if $channel.should_emit_channel_ready_event() {
-                       debug_assert!($channel.channel_pending_event_emitted());
+               if $channel.context.should_emit_channel_ready_event() {
+                       debug_assert!($channel.context.channel_pending_event_emitted());
                        $locked_events.push_back((events::Event::ChannelReady {
-                               channel_id: $channel.channel_id(),
-                               user_channel_id: $channel.get_user_id(),
-                               counterparty_node_id: $channel.get_counterparty_node_id(),
-                               channel_type: $channel.get_channel_type().clone(),
+                               channel_id: $channel.context.channel_id(),
+                               user_channel_id: $channel.context.get_user_id(),
+                               counterparty_node_id: $channel.context.get_counterparty_node_id(),
+                               channel_type: $channel.context.get_channel_type().clone(),
                        }, None));
-                       $channel.set_channel_ready_event_emitted();
+                       $channel.context.set_channel_ready_event_emitted();
                }
        }
 }
@@ -1740,8 +1749,8 @@ macro_rules! handle_monitor_update_completion {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
                        &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
-               let counterparty_node_id = $chan.get_counterparty_node_id();
-               let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
+               let counterparty_node_id = $chan.context.get_counterparty_node_id();
+               let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
                        // We only send a channel_update in the case where we are just now sending a
                        // channel_ready and the channel is in a usable state. We may re-send a
                        // channel_update later through the announcement_signatures process for public
@@ -1756,7 +1765,7 @@ macro_rules! handle_monitor_update_completion {
                } else { None };
 
                let update_actions = $peer_state.monitor_update_blocked_actions
-                       .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+                       .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
 
                let htlc_forwards = $self.handle_channel_resumption(
                        &mut $peer_state.pending_msg_events, $chan, updates.raa,
@@ -1767,7 +1776,7 @@ macro_rules! handle_monitor_update_completion {
                        $peer_state.pending_msg_events.push(upd);
                }
 
-               let channel_id = $chan.channel_id();
+               let channel_id = $chan.context.channel_id();
                core::mem::drop($peer_state_lock);
                core::mem::drop($per_peer_state_lock);
 
@@ -1795,16 +1804,16 @@ macro_rules! handle_new_monitor_update {
                match $update_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
-                                       log_bytes!($chan.channel_id()[..]));
+                                       log_bytes!($chan.context.channel_id()[..]));
                                Ok(())
                        },
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
-                                       log_bytes!($chan.channel_id()[..]));
+                                       log_bytes!($chan.context.channel_id()[..]));
                                update_maps_on_chan_removal!($self, $chan);
                                let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
-                                       "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
-                                       $chan.get_user_id(), $chan.force_shutdown(false),
+                                       "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
+                                       $chan.context.get_user_id(), $chan.force_shutdown(false),
                                        $self.get_channel_update_for_broadcast(&$chan).ok()));
                                $remove;
                                res
@@ -2048,7 +2057,7 @@ where
                };
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
-               let temporary_channel_id = channel.channel_id();
+               let temporary_channel_id = channel.context.channel_id();
                match peer_state.channel_by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(fuzzing) {
@@ -2107,7 +2116,7 @@ where
                // Note we use is_live here instead of usable which leads to somewhat confused
                // internal/external nomenclature, but that's ok cause that's probably what the user
                // really wanted anyway.
-               self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
+               self.list_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
        }
 
        /// Gets the list of channels we have with a given counterparty, in random order.
@@ -2162,14 +2171,14 @@ where
                match channel.unbroadcasted_funding() {
                        Some(transaction) => {
                                pending_events_lock.push_back((events::Event::DiscardFunding {
-                                       channel_id: channel.channel_id(), transaction
+                                       channel_id: channel.context.channel_id(), transaction
                                }, None));
                        },
                        None => {},
                }
                pending_events_lock.push_back((events::Event::ChannelClosed {
-                       channel_id: channel.channel_id(),
-                       user_channel_id: channel.get_user_id(),
+                       channel_id: channel.context.channel_id(),
+                       user_channel_id: channel.context.get_user_id(),
                        reason: closure_reason
                }, None));
        }
@@ -2188,7 +2197,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
                                        let their_features = &peer_state.latest_features;
                                        let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
                                                .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
@@ -2344,7 +2353,7 @@ where
                        });
                }
 
-               Ok(chan.get_counterparty_node_id())
+               Ok(chan.context.get_counterparty_node_id())
        }
 
        fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
@@ -2450,20 +2459,7 @@ where
                                });
                        },
                        msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => {
-                               if payment_data.is_some() && keysend_preimage.is_some() {
-                                       return Err(ReceiveError {
-                                               err_code: 0x4000|22,
-                                               err_data: Vec::new(),
-                                               msg: "We don't support MPP keysend payments",
-                                       });
-                               } else if let Some(data) = payment_data {
-                                       PendingHTLCRouting::Receive {
-                                               payment_data: data,
-                                               payment_metadata,
-                                               incoming_cltv_expiry: hop_data.outgoing_cltv_value,
-                                               phantom_shared_secret,
-                                       }
-                               } else if let Some(payment_preimage) = keysend_preimage {
+                               if let Some(payment_preimage) = keysend_preimage {
                                        // We need to check that the sender knows the keysend preimage before processing this
                                        // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
                                        // could discover the final destination of X, by probing the adjacent nodes on the route
@@ -2477,12 +2473,26 @@ where
                                                        msg: "Payment preimage didn't match payment hash",
                                                });
                                        }
-
+                                       if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() {
+                                               return Err(ReceiveError {
+                                                       err_code: 0x4000|22,
+                                                       err_data: Vec::new(),
+                                                       msg: "We don't support MPP keysend payments",
+                                               });
+                                       }
                                        PendingHTLCRouting::ReceiveKeysend {
+                                               payment_data,
                                                payment_preimage,
                                                payment_metadata,
                                                incoming_cltv_expiry: hop_data.outgoing_cltv_value,
                                        }
+                               } else if let Some(data) = payment_data {
+                                       PendingHTLCRouting::Receive {
+                                               payment_data: data,
+                                               payment_metadata,
+                                               incoming_cltv_expiry: hop_data.outgoing_cltv_value,
+                                               phantom_shared_secret,
+                                       }
                                } else {
                                        return Err(ReceiveError {
                                                err_code: 0x4000|0x2000|3,
@@ -2640,13 +2650,13 @@ where
                                                        },
                                                        Some(chan) => chan
                                                };
-                                               if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+                                               if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
                                                        // we don't allow forwards outbound over them.
                                                        break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
                                                }
-                                               if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
+                                               if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
                                                        // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
                                                        // "refuse to forward unless the SCID alias was used", so we pretend
                                                        // we don't have the channel here.
@@ -2659,7 +2669,7 @@ where
                                                // around to doing the actual forward, but better to fail early if we can and
                                                // hopefully an attacker trying to path-trace payments cannot make this occur
                                                // on a small/per-node/per-channel scale.
-                                               if !chan.is_live() { // channel_disabled
+                                               if !chan.context.is_live() { // channel_disabled
                                                        // If the channel_update we're going to return is disabled (i.e. the
                                                        // peer has been disabled for some time), return `channel_disabled`,
                                                        // otherwise return `temporary_channel_failure`.
@@ -2669,7 +2679,7 @@ where
                                                                break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
                                                        }
                                                }
-                                               if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+                                               if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
                                                        break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
                                                }
                                                if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
@@ -2755,16 +2765,16 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
-               if !chan.should_announce() {
+               if !chan.context.should_announce() {
                        return Err(LightningError {
                                err: "Cannot broadcast a channel_update for a private channel".to_owned(),
                                action: msgs::ErrorAction::IgnoreError
                        });
                }
-               if chan.get_short_channel_id().is_none() {
+               if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id()));
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -2780,8 +2790,8 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
-               let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                };
@@ -2789,10 +2799,10 @@ where
                self.get_channel_update_for_onion(short_channel_id, chan)
        }
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
-               let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
+               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
-               let enabled = chan.is_usable() && match chan.channel_update_status() {
+               let enabled = chan.context.is_usable() && match chan.channel_update_status() {
                        ChannelUpdateStatus::Enabled => true,
                        ChannelUpdateStatus::DisabledStaged(_) => true,
                        ChannelUpdateStatus::Disabled => false,
@@ -2802,13 +2812,13 @@ where
                let unsigned = msgs::UnsignedChannelUpdate {
                        chain_hash: self.genesis_hash,
                        short_channel_id,
-                       timestamp: chan.get_update_time_counter(),
+                       timestamp: chan.context.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
-                       cltv_expiry_delta: chan.get_cltv_expiry_delta(),
-                       htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
-                       htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
-                       fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
-                       fee_proportional_millionths: chan.get_fee_proportional_millionths(),
+                       cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
+                       htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
+                       htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
+                       fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
+                       fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                };
                // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
@@ -2856,10 +2866,10 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
-                               if !chan.get().is_live() {
+                               if !chan.get().context.is_live() {
                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
                                }
-                               let funding_txo = chan.get().get_funding_txo().unwrap();
+                               let funding_txo = chan.get().context.get_funding_txo().unwrap();
                                let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
                                        htlc_cltv, HTLCSource::OutboundRoute {
                                                path: path.clone(),
@@ -3032,8 +3042,6 @@ where
        /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
        /// [`send_payment`] for more information about the risks of duplicate preimage usage.
        ///
-       /// Note that `route` must have exactly one path.
-       ///
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
@@ -3098,7 +3106,7 @@ where
 
                                let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
                                        .map_err(|e| if let ChannelError::Close(msg) = e {
-                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None)
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (funding_msg, chan),
@@ -3106,7 +3114,7 @@ where
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
 
-                                               let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id());
+                                               let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id());
                                                return Err(APIError::ChannelUnavailable {
                                                        err: "Signer refused to sign the initial commitment transaction".to_owned()
                                                });
@@ -3123,16 +3131,16 @@ where
                };
 
                peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
-                       node_id: chan.get_counterparty_node_id(),
+                       node_id: chan.context.get_counterparty_node_id(),
                        msg,
                });
-               match peer_state.channel_by_id.entry(chan.channel_id()) {
+               match peer_state.channel_by_id.entry(chan.context.channel_id()) {
                        hash_map::Entry::Occupied(_) => {
                                panic!("Generated duplicate funding txid?");
                        },
                        hash_map::Entry::Vacant(e) => {
                                let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+                               if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
                                e.insert(chan);
@@ -3208,9 +3216,9 @@ where
                        }
 
                        let mut output_index = None;
-                       let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
+                       let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
                        for (idx, outp) in tx.output.iter().enumerate() {
-                               if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+                               if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
                                        if output_index.is_some() {
                                                return Err(APIError::APIMisuseError {
                                                        err: "Multiple outputs matched the expected script and value".to_owned()
@@ -3228,7 +3236,7 @@ where
                })
        }
 
-       /// Atomically updates the [`ChannelConfig`] for the given channels.
+       /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
        ///
        /// Once the updates are applied, each eligible channel (advertised with a known short channel
        /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
@@ -3250,10 +3258,10 @@ where
        /// [`ChannelUpdate`]: msgs::ChannelUpdate
        /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
        /// [`APIMisuseError`]: APIError::APIMisuseError
-       pub fn update_channel_config(
-               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+       pub fn update_partial_channel_config(
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate,
        ) -> Result<(), APIError> {
-               if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA {
+               if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
                        return Err(APIError::APIMisuseError {
                                err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
                        });
@@ -3274,14 +3282,16 @@ where
                }
                for channel_id in channel_ids {
                        let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
-                       if !channel.update_config(config) {
+                       let mut config = channel.context.config();
+                       config.apply(config_update);
+                       if !channel.context.update_config(&config) {
                                continue;
                        }
                        if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
                                peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
                        } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                       node_id: channel.get_counterparty_node_id(),
+                                       node_id: channel.context.get_counterparty_node_id(),
                                        msg,
                                });
                        }
@@ -3289,6 +3299,34 @@ where
                Ok(())
        }
 
+       /// Atomically updates the [`ChannelConfig`] for the given channels.
+       ///
+       /// Once the updates are applied, each eligible channel (advertised with a known short channel
+       /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
+       /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
+       /// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
+       ///
+       /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
+       /// `counterparty_node_id` is provided.
+       ///
+       /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
+       /// below [`MIN_CLTV_EXPIRY_DELTA`].
+       ///
+       /// If an error is returned, none of the updates should be considered applied.
+       ///
+       /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
+       /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
+       /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
+       /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
+       /// [`ChannelUpdate`]: msgs::ChannelUpdate
+       /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
+       /// [`APIMisuseError`]: APIError::APIMisuseError
+       pub fn update_channel_config(
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+       ) -> Result<(), APIError> {
+               return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
+       }
+
        /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
        /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
        ///
@@ -3322,12 +3360,12 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.get(next_hop_channel_id) {
                                Some(chan) => {
-                                       if !chan.is_usable() {
+                                       if !chan.context.is_usable() {
                                                return Err(APIError::ChannelUnavailable {
                                                        err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
                                                })
                                        }
-                                       chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+                                       chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
                                },
                                None => return Err(APIError::ChannelUnavailable {
                                        err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
@@ -3553,7 +3591,7 @@ where
                                                                                        let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::reason(failure_code, data),
-                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                        ));
                                                                                        continue;
                                                                                }
@@ -3598,16 +3636,19 @@ where
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
                                                                                        Some(payment_data), phantom_shared_secret, onion_fields)
                                                                        },
-                                                                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_metadata, incoming_cltv_expiry } => {
-                                                                               let onion_fields = RecipientOnionFields { payment_secret: None, payment_metadata };
+                                                                       PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => {
+                                                                               let onion_fields = RecipientOnionFields {
+                                                                                       payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
+                                                                                       payment_metadata
+                                                                               };
                                                                                (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
-                                                                                       None, None, onion_fields)
+                                                                                       payment_data, None, onion_fields)
                                                                        },
                                                                        _ => {
                                                                                panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
                                                                        }
                                                                };
-                                                               let mut claimable_htlc = ClaimableHTLC {
+                                                               let claimable_htlc = ClaimableHTLC {
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                outpoint: prev_funding_outpoint,
@@ -3657,13 +3698,11 @@ where
                                                                }
 
                                                                macro_rules! check_total_value {
-                                                                       ($payment_data: expr, $payment_preimage: expr) => {{
+                                                                       ($purpose: expr) => {{
                                                                                let mut payment_claimable_generated = false;
-                                                                               let purpose = || {
-                                                                                       events::PaymentPurpose::InvoicePayment {
-                                                                                               payment_preimage: $payment_preimage,
-                                                                                               payment_secret: $payment_data.payment_secret,
-                                                                                       }
+                                                                               let is_keysend = match $purpose {
+                                                                                       events::PaymentPurpose::SpontaneousPayment(_) => true,
+                                                                                       events::PaymentPurpose::InvoicePayment { .. } => false,
                                                                                };
                                                                                let mut claimable_payments = self.claimable_payments.lock().unwrap();
                                                                                if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
@@ -3675,9 +3714,18 @@ where
                                                                                        .or_insert_with(|| {
                                                                                                committed_to_claimable = true;
                                                                                                ClaimablePayment {
-                                                                                                       purpose: purpose(), htlcs: Vec::new(), onion_fields: None,
+                                                                                                       purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
                                                                                                }
                                                                                        });
+                                                                               if $purpose != claimable_payment.purpose {
+                                                                                       let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
+                                                                                       log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), log_bytes!(payment_hash.0), log_keysend(!is_keysend));
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
+                                                                               }
+                                                                               if !self.default_configuration.accept_mpp_keysend && is_keysend && !claimable_payment.htlcs.is_empty() {
+                                                                                       log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash and our config states we don't accept MPP keysend", log_bytes!(payment_hash.0));
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
+                                                                               }
                                                                                if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
                                                                                        if earlier_fields.check_merge(&mut onion_fields).is_err() {
                                                                                                fail_htlc!(claimable_htlc, payment_hash);
@@ -3686,38 +3734,27 @@ where
                                                                                        claimable_payment.onion_fields = Some(onion_fields);
                                                                                }
                                                                                let ref mut htlcs = &mut claimable_payment.htlcs;
-                                                                               if htlcs.len() == 1 {
-                                                                                       if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
-                                                                                               log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc, payment_hash);
-                                                                                       }
-                                                                               }
                                                                                let mut total_value = claimable_htlc.sender_intended_value;
                                                                                let mut earliest_expiry = claimable_htlc.cltv_expiry;
                                                                                for htlc in htlcs.iter() {
                                                                                        total_value += htlc.sender_intended_value;
                                                                                        earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
-                                                                                       match &htlc.onion_payload {
-                                                                                               OnionPayload::Invoice { .. } => {
-                                                                                                       if htlc.total_msat != $payment_data.total_msat {
-                                                                                                               log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
-                                                                                                                       log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
-                                                                                                               total_value = msgs::MAX_VALUE_MSAT;
-                                                                                                       }
-                                                                                                       if total_value >= msgs::MAX_VALUE_MSAT { break; }
-                                                                                               },
-                                                                                               _ => unreachable!(),
+                                                                                       if htlc.total_msat != claimable_htlc.total_msat {
+                                                                                               log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+                                                                                                       log_bytes!(payment_hash.0), claimable_htlc.total_msat, htlc.total_msat);
+                                                                                               total_value = msgs::MAX_VALUE_MSAT;
                                                                                        }
+                                                                                       if total_value >= msgs::MAX_VALUE_MSAT { break; }
                                                                                }
                                                                                // The condition determining whether an MPP is complete must
                                                                                // match exactly the condition used in `timer_tick_occurred`
                                                                                if total_value >= msgs::MAX_VALUE_MSAT {
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
-                                                                               } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat {
+                                                                               } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
                                                                                                log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
-                                                                               } else if total_value >= $payment_data.total_msat {
+                                                                               } else if total_value >= claimable_htlc.total_msat {
                                                                                        #[allow(unused_assignments)] {
                                                                                                committed_to_claimable = true;
                                                                                        }
@@ -3728,7 +3765,7 @@ where
                                                                                        new_events.push_back((events::Event::PaymentClaimable {
                                                                                                receiver_node_id: Some(receiver_node_id),
                                                                                                payment_hash,
-                                                                                               purpose: purpose(),
+                                                                                               purpose: $purpose,
                                                                                                amount_msat,
                                                                                                via_channel_id: Some(prev_channel_id),
                                                                                                via_user_channel_id: Some(prev_user_channel_id),
@@ -3776,49 +3813,23 @@ where
                                                                                                                fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
-                                                                                               check_total_value!(payment_data, payment_preimage);
+                                                                                               let purpose = events::PaymentPurpose::InvoicePayment {
+                                                                                                       payment_preimage: payment_preimage.clone(),
+                                                                                                       payment_secret: payment_data.payment_secret,
+                                                                                               };
+                                                                                               check_total_value!(purpose);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
-                                                                                               let mut claimable_payments = self.claimable_payments.lock().unwrap();
-                                                                                               if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
-                                                                                                       fail_htlc!(claimable_htlc, payment_hash);
-                                                                                               }
-                                                                                               match claimable_payments.claimable_payments.entry(payment_hash) {
-                                                                                                       hash_map::Entry::Vacant(e) => {
-                                                                                                               let amount_msat = claimable_htlc.value;
-                                                                                                               claimable_htlc.total_value_received = Some(amount_msat);
-                                                                                                               let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER);
-                                                                                                               let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
-                                                                                                               e.insert(ClaimablePayment {
-                                                                                                                       purpose: purpose.clone(),
-                                                                                                                       onion_fields: Some(onion_fields.clone()),
-                                                                                                                       htlcs: vec![claimable_htlc],
-                                                                                                               });
-                                                                                                               let prev_channel_id = prev_funding_outpoint.to_channel_id();
-                                                                                                               new_events.push_back((events::Event::PaymentClaimable {
-                                                                                                                       receiver_node_id: Some(receiver_node_id),
-                                                                                                                       payment_hash,
-                                                                                                                       amount_msat,
-                                                                                                                       purpose,
-                                                                                                                       via_channel_id: Some(prev_channel_id),
-                                                                                                                       via_user_channel_id: Some(prev_user_channel_id),
-                                                                                                                       claim_deadline,
-                                                                                                                       onion_fields: Some(onion_fields),
-                                                                                                               }, None));
-                                                                                                       },
-                                                                                                       hash_map::Entry::Occupied(_) => {
-                                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
-                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
-                                                                                                       }
-                                                                                               }
+                                                                                               let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
+                                                                                               check_total_value!(purpose);
                                                                                        }
                                                                                }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
-                                                                               if payment_data.is_none() {
+                                                                               if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload {
                                                                                        log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
-                                                                               };
+                                                                               }
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
@@ -3828,7 +3839,11 @@ where
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
-                                                                                       let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
+                                                                                       let purpose = events::PaymentPurpose::InvoicePayment {
+                                                                                               payment_preimage: inbound_payment.get().payment_preimage,
+                                                                                               payment_secret: payment_data.payment_secret,
+                                                                                       };
+                                                                                       let payment_claimable_generated = check_total_value!(purpose);
                                                                                        if payment_claimable_generated {
                                                                                                inbound_payment.remove_entry();
                                                                                        }
@@ -3928,20 +3943,20 @@ where
        }
 
        fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
-               if !chan.is_outbound() { return NotifyOption::SkipPersist; }
+               if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
                // If the feerate has decreased by less than half, don't bother
-               if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() {
+               if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
-                               log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
+                               log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersist;
                }
-               if !chan.is_live() {
+               if !chan.context.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
-                               log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
+                               log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersist;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
-                       log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
+                       log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
                chan.queue_update_fee(new_feerate, &self.logger);
                NotifyOption::DoPersist
@@ -4015,13 +4030,13 @@ where
                                                }
 
                                                match chan.channel_update_status() {
-                                                       ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
-                                                       ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
-                                                       ChannelUpdateStatus::DisabledStaged(_) if chan.is_live()
+                                                       ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
+                                                       ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
+                                                       ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
                                                                => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
-                                                       ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live()
+                                                       ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
                                                                => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
-                                                       ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => {
+                                                       ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
                                                                n += 1;
                                                                if n >= DISABLE_GOSSIP_TICKS {
                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
@@ -4035,7 +4050,7 @@ where
                                                                        chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
                                                                }
                                                        },
-                                                       ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => {
+                                                       ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
                                                                n += 1;
                                                                if n >= ENABLE_GOSSIP_TICKS {
                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
@@ -4052,7 +4067,7 @@ where
                                                        _ => {},
                                                }
 
-                                               chan.maybe_expire_prev_config();
+                                               chan.context.maybe_expire_prev_config();
 
                                                if chan.should_disconnect_peer_awaiting_response() {
                                                        log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
@@ -4212,10 +4227,10 @@ where
                // guess somewhat. If its a public channel, we figure best to just use the real SCID (as
                // we're not leaking that we have a channel with the counterparty), otherwise we try to use
                // an inbound SCID alias before the real SCID.
-               let scid_pref = if chan.should_announce() {
-                       chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
+               let scid_pref = if chan.context.should_announce() {
+                       chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias())
                } else {
-                       chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
+                       chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id())
                };
                if let Some(scid) = scid_pref {
                        self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
@@ -4408,18 +4423,6 @@ where
                                break;
                        }
                        expected_amt_msat = htlc.total_value_received;
-
-                       if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
-                               // We don't currently support MPP for spontaneous payments, so just check
-                               // that there's one payment here and move on.
-                               if sources.len() != 1 {
-                                       log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
-                                       debug_assert!(false);
-                                       valid_mpp = false;
-                                       break;
-                               }
-                       }
-
                        claimable_amt_msat += htlc.value;
                }
                mem::drop(per_peer_state);
@@ -4489,7 +4492,7 @@ where
                                let mut peer_state_lock = peer_state_opt.unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
-                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                       let counterparty_node_id = chan.get().context.get_counterparty_node_id();
                                        let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
 
                                        if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
@@ -4615,7 +4618,7 @@ where
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
                log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
-                       log_bytes!(channel.channel_id()),
+                       log_bytes!(channel.context.channel_id()),
                        if raa.is_some() { "an" } else { "no" },
                        if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
@@ -4624,10 +4627,10 @@ where
 
                let mut htlc_forwards = None;
 
-               let counterparty_node_id = channel.get_counterparty_node_id();
+               let counterparty_node_id = channel.context.get_counterparty_node_id();
                if !pending_forwards.is_empty() {
-                       htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
-                               channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
+                       htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
+                               channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
                }
 
                if let Some(msg) = channel_ready {
@@ -4709,8 +4712,8 @@ where
                        }
                };
                log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
-                       highest_applied_update_id, channel.get().get_latest_monitor_update_id());
-               if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
+                       highest_applied_update_id, channel.get().context.get_latest_monitor_update_id());
+               if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id {
                        return;
                }
                handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
@@ -4775,9 +4778,9 @@ where
                                }
                                if accept_0conf {
                                        channel.get_mut().set_0conf();
-                               } else if channel.get().get_channel_type().requires_zero_conf() {
+                               } else if channel.get().context.get_channel_type().requires_zero_conf() {
                                        let send_msg_err_event = events::MessageSendEvent::HandleError {
-                                               node_id: channel.get().get_counterparty_node_id(),
+                                               node_id: channel.get().context.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage{
                                                        msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
                                                }
@@ -4791,7 +4794,7 @@ where
                                        // channels per-peer we can accept channels from a peer with existing ones.
                                        if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
                                                let send_msg_err_event = events::MessageSendEvent::HandleError {
-                                                       node_id: channel.get().get_counterparty_node_id(),
+                                                       node_id: channel.get().context.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage{
                                                                msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
                                                        }
@@ -4803,7 +4806,7 @@ where
                                }
 
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                       node_id: channel.get().get_counterparty_node_id(),
+                                       node_id: channel.get().context.get_counterparty_node_id(),
                                        msg: channel.get_mut().accept_inbound_channel(user_channel_id),
                                });
                        }
@@ -4842,8 +4845,8 @@ where
        ) -> usize {
                let mut num_unfunded_channels = 0;
                for (_, chan) in peer.channel_by_id.iter() {
-                       if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
-                               chan.get_funding_tx_confirmations(best_block_height) == 0
+                       if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+                               chan.context.get_funding_tx_confirmations(best_block_height) == 0
                        {
                                num_unfunded_channels += 1;
                        }
@@ -4908,14 +4911,14 @@ where
                        },
                        Ok(res) => res
                };
-               match peer_state.channel_by_id.entry(channel.channel_id()) {
+               match peer_state.channel_by_id.entry(channel.context.channel_id()) {
                        hash_map::Entry::Occupied(_) => {
                                self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
                                return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
                        },
                        hash_map::Entry::Vacant(entry) => {
                                if !self.default_configuration.manually_accept_inbound_channels {
-                                       if channel.get_channel_type().requires_zero_conf() {
+                                       if channel.context.get_channel_type().requires_zero_conf() {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
                                        }
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
@@ -4929,7 +4932,7 @@ where
                                                counterparty_node_id: counterparty_node_id.clone(),
                                                funding_satoshis: msg.funding_satoshis,
                                                push_msat: msg.push_msat,
-                                               channel_type: channel.get_channel_type().clone(),
+                                               channel_type: channel.context.get_channel_type().clone(),
                                        }, None));
                                }
 
@@ -4952,7 +4955,7 @@ where
                        match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
-                                       (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+                                       (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
@@ -4993,14 +4996,14 @@ where
                                Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
-                               match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
+                               match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
                                                        funding_msg.channel_id))
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
-                                               i_e.insert(chan.get_counterparty_node_id());
+                                               i_e.insert(chan.context.get_counterparty_node_id());
                                        }
                                }
 
@@ -5050,7 +5053,7 @@ where
                        hash_map::Entry::Occupied(mut chan) => {
                                let monitor = try_chan_entry!(self,
                                        chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
-                               let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
+                               let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
                                let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                        // We weren't able to watch the channel to begin with, so no updates should be made on
@@ -5080,18 +5083,18 @@ where
                                let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
                                        self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
                                if let Some(announcement_sigs) = announcement_sigs_opt {
-                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id()));
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
-                               } else if chan.get().is_usable() {
+                               } else if chan.get().context.is_usable() {
                                        // If we're sending an announcement_signatures, we'll send the (public)
                                        // channel_update after sending a channel_announcement when we receive our
                                        // counterparty's announcement_signatures. Thus, we only bother to send a
                                        // channel_update here if the channel is not public, i.e. we're not sending an
                                        // announcement_signatures.
-                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id()));
                                        if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                        node_id: counterparty_node_id.clone(),
@@ -5131,7 +5134,7 @@ where
                                                        if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
                                        }
 
-                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
                                        let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
                                                chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
                                        dropped_htlcs = htlcs;
@@ -5337,7 +5340,7 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               let funding_txo = chan.get().get_funding_txo();
+                               let funding_txo = chan.get().context.get_funding_txo();
                                let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
                                if let Some(monitor_update) = monitor_update_opt {
                                        let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
@@ -5476,7 +5479,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
-                                       let funding_txo = chan.get().get_funding_txo();
+                                       let funding_txo = chan.get().context.get_funding_txo();
                                        let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
                                        let res = if let Some(monitor_update) = monitor_update_opt {
                                                let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
@@ -5522,7 +5525,7 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               if !chan.get().is_usable() {
+                               if !chan.get().context.is_usable() {
                                        return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
                                }
 
@@ -5559,8 +5562,8 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(chan_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
-                                       if chan.get().should_announce() {
+                               if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
+                                       if chan.get().context.should_announce() {
                                                // If the announcement is about a channel of ours which is public, some
                                                // other peer may simply be forwarding all its gossip to us. Don't provide
                                                // a scary-looking error message and return Ok instead.
@@ -5568,7 +5571,7 @@ where
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
-                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
                                let msg_from_node_one = msg.contents.flags & 1 == 0;
                                if were_node_one == msg_from_node_one {
                                        return Ok(NotifyOption::SkipPersist);
@@ -5609,18 +5612,18 @@ where
                                                        node_id: counterparty_node_id.clone(),
                                                        msg,
                                                });
-                                       } else if chan.get().is_usable() {
+                                       } else if chan.get().context.is_usable() {
                                                // If the channel is in a usable state (ie the channel is not being shut
                                                // down), send a unicast channel_update to our counterparty to make sure
                                                // they have the latest channel parameters.
                                                if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
                                                        channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               node_id: chan.get().context.get_counterparty_node_id(),
                                                                msg,
                                                        });
                                                }
                                        }
-                                       let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+                                       let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take();
                                        htlc_forwards = self.handle_channel_resumption(
                                                &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
                                                Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
@@ -5696,9 +5699,9 @@ where
                                                                        };
                                                                        self.issue_channel_close_events(&chan, reason);
                                                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                                               node_id: chan.get_counterparty_node_id(),
+                                                                               node_id: chan.context.get_counterparty_node_id(),
                                                                                action: msgs::ErrorAction::SendErrorMessage {
-                                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                                       msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
                                                                                },
                                                                        });
                                                                }
@@ -5747,8 +5750,8 @@ where
                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                        let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
                                        for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
-                                               let counterparty_node_id = chan.get_counterparty_node_id();
-                                               let funding_txo = chan.get_funding_txo();
+                                               let counterparty_node_id = chan.context.get_counterparty_node_id();
+                                               let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
                                                        chan.maybe_free_holding_cell_htlcs(&self.logger);
                                                if !holding_cell_failed_htlcs.is_empty() {
@@ -5807,7 +5810,7 @@ where
                                                        if let Some(msg) = msg_opt {
                                                                has_update = true;
                                                                pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                                       node_id: chan.get_counterparty_node_id(), msg,
+                                                                       node_id: chan.context.get_counterparty_node_id(), msg,
                                                                });
                                                        }
                                                        if let Some(tx) = tx_opt {
@@ -5830,7 +5833,7 @@ where
                                                Err(e) => {
                                                        has_update = true;
                                                        let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                                       handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                                       handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
                                                        !close_channel
                                                }
                                        }
@@ -6161,7 +6164,7 @@ where
                                }
 
                                if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
-                                       debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
+                                       debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                        if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
                                                log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                        log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
@@ -6417,7 +6420,7 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        for chan in peer_state.channel_by_id.values() {
-                               if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+                               if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
                                        res.push((funding_txo.txid, Some(block_hash)));
                                }
                        }
@@ -6429,7 +6432,7 @@ where
                let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
                        &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(None, |channel| {
-                       if let Some(funding_txo) = channel.get_funding_txo() {
+                       if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
                                        channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
                                } else { Ok((None, Vec::new(), None)) }
@@ -6472,20 +6475,20 @@ where
                                                for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                        let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                        timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
-                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
+                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
                                                }
                                                if let Some(channel_ready) = channel_ready_opt {
                                                        send_channel_ready!(self, pending_msg_events, channel, channel_ready);
-                                                       if channel.is_usable() {
-                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       if channel.context.is_usable() {
+                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id()));
                                                                if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                                               node_id: channel.get_counterparty_node_id(),
+                                                                               node_id: channel.context.get_counterparty_node_id(),
                                                                                msg,
                                                                        });
                                                                }
                                                        } else {
-                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id()));
                                                        }
                                                }
 
@@ -6495,9 +6498,9 @@ where
                                                }
 
                                                if let Some(announcement_sigs) = announcement_sigs {
-                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
+                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id()));
                                                        pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                               node_id: channel.get_counterparty_node_id(),
+                                                               node_id: channel.context.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
                                                        if let Some(height) = height_opt {
@@ -6512,7 +6515,7 @@ where
                                                        }
                                                }
                                                if channel.is_our_channel_ready() {
-                                                       if let Some(real_scid) = channel.get_short_channel_id() {
+                                                       if let Some(real_scid) = channel.context.get_short_channel_id() {
                                                                // If we sent a 0conf channel_ready, and now have an SCID, we add it
                                                                // to the short_to_chan_info map here. Note that we check whether we
                                                                // can relay using the real SCID at relay-time (i.e.
@@ -6520,8 +6523,8 @@ where
                                                                // un-confirmed we force-close the channel, ensuring short_to_chan_info
                                                                // is always consistent.
                                                                let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
-                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
-                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
+                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
                                                                        "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
                                                                        fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
                                                        }
@@ -6539,9 +6542,9 @@ where
                                                let reason_message = format!("{}", reason);
                                                self.issue_channel_close_events(channel, reason);
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: channel.get_counterparty_node_id(),
+                                                       node_id: channel.context.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                               channel_id: channel.channel_id(),
+                                                               channel_id: channel.context.channel_id(),
                                                                data: reason_message,
                                                        } },
                                                });
@@ -6907,8 +6910,8 @@ where
                        let peer_state = &mut *peer_state_lock;
                        let pending_msg_events = &mut peer_state.pending_msg_events;
                        peer_state.channel_by_id.retain(|_, chan| {
-                               let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                       if !chan.have_received_message() {
+                               let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
+                                       if !chan.context.have_received_message() {
                                                // If we created this (outbound) channel while we were disconnected from the
                                                // peer we probably failed to send the open_channel message, which is now
                                                // lost. We can't have had anything pending related to this channel, so we just
@@ -6916,13 +6919,13 @@ where
                                                false
                                        } else {
                                                pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                                       node_id: chan.get_counterparty_node_id(),
+                                                       node_id: chan.context.get_counterparty_node_id(),
                                                        msg: chan.get_channel_reestablish(&self.logger),
                                                });
                                                true
                                        }
                                } else { true };
-                               if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+                               if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
                                        if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
                                                if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
@@ -6987,6 +6990,10 @@ where
                provided_init_features(&self.default_configuration)
        }
 
+       fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+               Some(vec![ChainHash::from(&self.genesis_hash[..])])
+       }
+
        fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
@@ -7136,10 +7143,9 @@ impl Writeable for ChannelDetails {
                        (14, user_channel_id_low, required),
                        (16, self.balance_msat, required),
                        (18, self.outbound_capacity_msat, required),
-                       // Note that by the time we get past the required read above, outbound_capacity_msat will be
-                       // filled in, so we can safely unwrap it here.
-                       (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+                       (19, self.next_outbound_htlc_limit_msat, required),
                        (20, self.inbound_capacity_msat, required),
+                       (21, self.next_outbound_htlc_minimum_msat, required),
                        (22, self.confirmations_required, option),
                        (24, self.force_close_spend_delay, option),
                        (26, self.is_outbound, required),
@@ -7176,6 +7182,7 @@ impl Readable for ChannelDetails {
                        // filled in, so we can safely unwrap it here.
                        (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
                        (20, inbound_capacity_msat, required),
+                       (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
                        (22, confirmations_required, option),
                        (24, force_close_spend_delay, option),
                        (26, is_outbound, required),
@@ -7209,6 +7216,7 @@ impl Readable for ChannelDetails {
                        balance_msat: balance_msat.0.unwrap(),
                        outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
                        next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+                       next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
                        inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
                        confirmations_required,
                        confirmations,
@@ -7245,6 +7253,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting,
                (0, payment_preimage, required),
                (2, incoming_cltv_expiry, required),
                (3, payment_metadata, option),
+               (4, payment_data, option), // Added in 0.0.116
        },
 ;);
 
@@ -7544,7 +7553,7 @@ where
                                }
                                number_of_channels += peer_state.channel_by_id.len();
                                for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if !channel.is_funding_initiated() {
+                                       if !channel.context.is_funding_initiated() {
                                                unfunded_channels += 1;
                                        }
                                }
@@ -7556,7 +7565,7 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if channel.is_funding_initiated() {
+                                       if channel.context.is_funding_initiated() {
                                                channel.write(writer)?;
                                        }
                                }
@@ -7842,7 +7851,7 @@ where
        pub default_config: UserConfig,
 
        /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
-       /// value.get_funding_txo() should be the key).
+       /// value.context.get_funding_txo() should be the key).
        ///
        /// If a monitor is inconsistent with the channel state during deserialization the channel will
        /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
@@ -7932,14 +7941,14 @@ where
                        let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
-                       let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                       let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
                                if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
                                        // If the channel is ahead of the monitor, return InvalidValue:
                                        log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
+                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -7948,12 +7957,12 @@ where
                                } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                                channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
-                                               channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
+                                               channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
                                        log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -7962,8 +7971,8 @@ where
                                        }
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
-                                               channel_id: channel.channel_id(),
-                                               user_channel_id: channel.get_user_id(),
+                                               channel_id: channel.context.channel_id(),
+                                               user_channel_id: channel.context.get_user_id(),
                                                reason: ClosureReason::OutdatedChannelManager
                                        }, None));
                                        for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
@@ -7981,29 +7990,29 @@ where
                                                        // backwards leg of the HTLC will simply be rejected.
                                                        log_info!(args.logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
-                                                               log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
-                                                       failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
+                                                               log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0));
+                                                       failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                                }
                                        }
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
-                                               log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(),
+                                               log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
                                        channel.complete_all_mon_updates_through(monitor.get_latest_update_id());
-                                       if let Some(short_channel_id) = channel.get_short_channel_id() {
-                                               short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                       if let Some(short_channel_id) = channel.context.get_short_channel_id() {
+                                               short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
-                                       if channel.is_funding_initiated() {
-                                               id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
+                                       if channel.context.is_funding_initiated() {
+                                               id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
                                        }
-                                       match peer_channels.entry(channel.get_counterparty_node_id()) {
+                                       match peer_channels.entry(channel.context.get_counterparty_node_id()) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        let by_id_map = entry.get_mut();
-                                                       by_id_map.insert(channel.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), channel);
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        let mut by_id_map = HashMap::new();
-                                                       by_id_map.insert(channel.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), channel);
                                                        entry.insert(by_id_map);
                                                }
                                        }
@@ -8014,12 +8023,12 @@ where
                                // safely discard the channel.
                                let _ = channel.force_shutdown(false);
                                channel_closures.push_back((events::Event::ChannelClosed {
-                                       channel_id: channel.channel_id(),
-                                       user_channel_id: channel.get_user_id(),
+                                       channel_id: channel.context.channel_id(),
+                                       user_channel_id: channel.context.get_user_id(),
                                        reason: ClosureReason::DisconnectedPeer,
                                }, None));
                        } else {
-                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
                                log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
@@ -8108,7 +8117,7 @@ where
                        let peer_state = peer_mtx.lock().unwrap();
                        for (_, chan) in peer_state.channel_by_id.iter() {
                                for update in chan.uncompleted_unblocked_mon_updates() {
-                                       if let Some(funding_txo) = chan.get_funding_txo() {
+                                       if let Some(funding_txo) = chan.context.get_funding_txo() {
                                                log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}",
                                                        update.update_id, log_bytes!(funding_txo.to_channel_id()));
                                                pending_background_events.push(
@@ -8400,25 +8409,25 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
-                               if chan.outbound_scid_alias() == 0 {
+                               if chan.context.outbound_scid_alias() == 0 {
                                        let mut outbound_scid_alias;
                                        loop {
                                                outbound_scid_alias = fake_scid::Namespace::OutboundAlias
                                                        .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
                                                if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
                                        }
-                                       chan.set_outbound_scid_alias(outbound_scid_alias);
-                               } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
+                                       chan.context.set_outbound_scid_alias(outbound_scid_alias);
+                               } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
                                        // Note that in rare cases its possible to hit this while reading an older
                                        // channel if we just happened to pick a colliding outbound alias above.
-                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                        return Err(DecodeError::InvalidValue);
                                }
-                               if chan.is_usable() {
-                                       if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+                               if chan.context.is_usable() {
+                                       if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
                                                // Note that in rare cases its possible to hit this while reading an older
                                                // channel if we just happened to pick a colliding outbound alias above.
-                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                return Err(DecodeError::InvalidValue);
                                        }
                                }
@@ -8578,7 +8587,7 @@ mod tests {
        use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
        use crate::util::errors::APIError;
        use crate::util::test_utils;
-       use crate::util::config::ChannelConfig;
+       use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
        use crate::sign::EntropySource;
 
        #[test]
@@ -8787,13 +8796,26 @@ mod tests {
 
        #[test]
        fn test_keysend_dup_payment_hash() {
+               do_test_keysend_dup_payment_hash(false);
+               do_test_keysend_dup_payment_hash(true);
+       }
+
+       fn do_test_keysend_dup_payment_hash(accept_mpp_keysend: bool) {
                // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
                //      outbound regular payment fails as expected.
                // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
                //      fails as expected.
+               // (3): Test that a keysend payment with a duplicate payment hash to an existing keysend
+               //      payment fails as expected. When `accept_mpp_keysend` is false, this tests that we
+               //      reject MPP keysend payments, since in this case where the payment has no payment
+               //      secret, a keysend payment with a duplicate hash is basically an MPP keysend. If
+               //      `accept_mpp_keysend` is true, this tests that we only accept MPP keysends with
+               //      payment secrets and reject otherwise.
                let chanmon_cfgs = create_chanmon_cfgs(2);
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let mut mpp_keysend_cfg = test_default_channel_config();
+               mpp_keysend_cfg.accept_mpp_keysend = accept_mpp_keysend;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(mpp_keysend_cfg)]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                create_announced_chan_between_nodes(&nodes, 0, 1);
                let scorer = test_utils::TestScorer::new();
@@ -8805,7 +8827,7 @@ mod tests {
 
                // Next, attempt a keysend payment and make sure it fails.
                let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
+                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
                        final_value_msat: 100_000,
                };
                let route = find_route(
@@ -8882,6 +8904,53 @@ mod tests {
 
                // Finally, succeed the keysend payment.
                claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+               // To start (3), send a keysend payment but don't claim it.
+               let payment_id_1 = PaymentId([44; 32]);
+               let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+                       RecipientOnionFields::spontaneous_empty(), payment_id_1).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let event = events.pop().unwrap();
+               let path = vec![&nodes[1]];
+               pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+               // Next, attempt a keysend payment and make sure it fails.
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
+                       final_value_msat: 100_000,
+               };
+               let route = find_route(
+                       &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+               ).unwrap();
+               let payment_id_2 = PaymentId([45; 32]);
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+                       RecipientOnionFields::spontaneous_empty(), payment_id_2).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, claim the original payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
        }
 
        #[test]
@@ -8898,7 +8967,7 @@ mod tests {
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
                let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
                        final_value_msat: 10_000,
                };
                let network_graph = nodes[0].network_graph.clone();
@@ -8931,10 +9000,13 @@ mod tests {
 
        #[test]
        fn test_keysend_msg_with_secret_err() {
-               // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+               // Test that we error as expected if we receive a keysend payment that includes a payment
+               // secret when we don't support MPP keysend.
+               let mut reject_mpp_keysend_cfg = test_default_channel_config();
+               reject_mpp_keysend_cfg.accept_mpp_keysend = false;
                let chanmon_cfgs = create_chanmon_cfgs(2);
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(reject_mpp_keysend_cfg)]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
@@ -8942,7 +9014,7 @@ mod tests {
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
                let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
                        final_value_msat: 10_000,
                };
                let network_graph = nodes[0].network_graph.clone();
@@ -9297,12 +9369,14 @@ mod tests {
                                &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                        peer_pks.push(random_pk);
                        nodes[1].node.peer_connected(&random_pk, &msgs::Init {
-                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+                       }, true).unwrap();
                }
                let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
                        &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap_err();
 
                // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
                // them if we have too many un-channel'd peers.
@@ -9313,13 +9387,16 @@ mod tests {
                        if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
                }
                nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap_err();
 
                // but of course if the connection is outbound its allowed...
                nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
                // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
@@ -9343,7 +9420,8 @@ mod tests {
                // "protected" and can connect again.
                mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
                nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
                // Further, because the first channel was funded, we can open another channel with
@@ -9408,7 +9486,8 @@ mod tests {
                        let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
                                &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                        nodes[1].node.peer_connected(&random_pk, &msgs::Init {
-                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+                       }, true).unwrap();
 
                        nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
                        let events = nodes[1].node.get_and_clear_pending_events();
@@ -9426,7 +9505,8 @@ mod tests {
                let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
                        &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                let events = nodes[1].node.get_and_clear_pending_events();
                match events[0] {
@@ -9489,6 +9569,62 @@ mod tests {
 
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        }
+
+       #[test]
+       fn test_update_channel_config() {
+               let chanmon_cfg = create_chanmon_cfgs(2);
+               let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+               let mut user_config = test_default_channel_config();
+               let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+               let nodes = create_network(2, &node_cfg, &node_chanmgr);
+               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+               let channel = &nodes[0].node.list_channels()[0];
+
+               nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 0);
+
+               user_config.channel_config.forwarding_fee_base_msat += 10;
+               nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               match &events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("expected BroadcastChannelUpdate event"),
+               }
+
+               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 0);
+
+               let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
+               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
+                       cltv_expiry_delta: Some(new_cltv_expiry_delta),
+                       ..Default::default()
+               }).unwrap();
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               match &events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("expected BroadcastChannelUpdate event"),
+               }
+
+               let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
+               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
+                       forwarding_fee_proportional_millionths: Some(new_fee),
+                       ..Default::default()
+               }).unwrap();
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               match &events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("expected BroadcastChannelUpdate event"),
+               }
+       }
 }
 
 #[cfg(ldk_bench)]
@@ -9570,8 +9706,12 @@ pub mod bench {
                });
                let node_b_holder = ANodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init {
+                       features: node_b.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init {
+                       features: node_a.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));