Set UpdateAddHTLC::skimmed_fee_msat on forward
[rust-lightning] / lightning / src / ln / channelmanager.rs
index d427e35cc526cd7078cf6df9ee2cc69c42a34eef..9d656d02286f57a5b85b42593b4ac01a397871fd 100644 (file)
@@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch};
+use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::InvoiceFeatures;
@@ -131,6 +131,9 @@ pub(super) struct PendingHTLCInfo {
        /// may overshoot this in either case)
        pub(super) outgoing_amt_msat: u64,
        pub(super) outgoing_cltv_value: u32,
+       /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
+       /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
+       pub(super) skimmed_fee_msat: Option<u64>,
 }
 
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
@@ -210,6 +213,8 @@ struct ClaimableHTLC {
        total_value_received: Option<u64>,
        /// The sender intended sum total of all MPP parts specified in the onion
        total_msat: u64,
+       /// The extra fee our counterparty skimmed off the top of this HTLC.
+       counterparty_skimmed_fee_msat: Option<u64>,
 }
 
 /// A payment identifier used to uniquely identify a payment to LDK.
@@ -607,12 +612,22 @@ impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
 
 /// State we hold per-peer.
 pub(super) struct PeerState<Signer: ChannelSigner> {
-       /// `temporary_channel_id` or `channel_id` -> `channel`.
+       /// `channel_id` -> `Channel`.
        ///
-       /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a
-       /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the
-       /// `channel_id`.
+       /// Holds all funded channels where the peer is the counterparty.
        pub(super) channel_by_id: HashMap<[u8; 32], Channel<Signer>>,
+       /// `temporary_channel_id` -> `OutboundV1Channel`.
+       ///
+       /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
+       /// been assigned a `channel_id`, the entry in this map is removed and one is created in
+       /// `channel_by_id`.
+       pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel<Signer>>,
+       /// `temporary_channel_id` -> `InboundV1Channel`.
+       ///
+       /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
+       /// been assigned a `channel_id`, the entry in this map is removed and one is created in
+       /// `channel_by_id`.
+       pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel<Signer>>,
        /// The latest `InitFeatures` we heard from the peer.
        latest_features: InitFeatures,
        /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
@@ -654,6 +669,20 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
                }
                self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
        }
+
+       // Returns a count of all channels we have with this peer, including pending channels.
+       fn total_channel_count(&self) -> usize {
+               self.channel_by_id.len() +
+                       self.outbound_v1_channel_by_id.len() +
+                       self.inbound_v1_channel_by_id.len()
+       }
+
+       // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
+       fn has_channel(&self, channel_id: &[u8; 32]) -> bool {
+               self.channel_by_id.contains_key(channel_id) ||
+                       self.outbound_v1_channel_by_id.contains_key(channel_id) ||
+                       self.inbound_v1_channel_by_id.contains_key(channel_id)
+       }
 }
 
 /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
@@ -1610,14 +1639,23 @@ macro_rules! handle_error {
                                Err(err)
                        },
                }
-       } }
+       } };
+       ($self: ident, $internal: expr) => {
+               match $internal {
+                       Ok(res) => Ok(res),
+                       Err((chan, msg_handle_err)) => {
+                               let counterparty_node_id = chan.get_counterparty_node_id();
+                               handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err))
+                       },
+               }
+       };
 }
 
 macro_rules! update_maps_on_chan_removal {
-       ($self: expr, $channel: expr) => {{
-               $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id());
+       ($self: expr, $channel_context: expr) => {{
+               $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
-               if let Some(short_id) = $channel.context.get_short_channel_id() {
+               if let Some(short_id) = $channel_context.get_short_channel_id() {
                        short_to_chan_info.remove(&short_id);
                } else {
                        // If the channel was never confirmed on-chain prior to its closure, remove the
@@ -1626,10 +1664,10 @@ macro_rules! update_maps_on_chan_removal {
                        // also don't want a counterparty to be able to trivially cause a memory leak by simply
                        // opening a million channels with us which are closed before we ever reach the funding
                        // stage.
-                       let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias());
+                       let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
                        debug_assert!(alias_removed);
                }
-               short_to_chan_info.remove(&$channel.context.outbound_scid_alias());
+               short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
        }}
 }
 
@@ -1645,12 +1683,25 @@ macro_rules! convert_chan_err {
                        },
                        ChannelError::Close(msg) => {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, $channel);
-                               let shutdown_res = $channel.force_shutdown(true);
+                               update_maps_on_chan_removal!($self, &$channel.context);
+                               let shutdown_res = $channel.context.force_shutdown(true);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                }
+       };
+       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => {
+               match $err {
+                       // We should only ever have `ChannelError::Close` when prefunded channels error.
+                       // In any case, just close the channel.
+                       ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
+                               log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+                               update_maps_on_chan_removal!($self, &$channel_context);
+                               let shutdown_res = $channel_context.force_shutdown(false);
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
+                                       shutdown_res, None))
+                       },
+               }
        }
 }
 
@@ -1669,6 +1720,21 @@ macro_rules! break_chan_entry {
        }
 }
 
+macro_rules! try_v1_outbound_chan_entry {
+       ($self: ident, $res: expr, $entry: expr) => {
+               match $res {
+                       Ok(res) => res,
+                       Err(e) => {
+                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED);
+                               if drop {
+                                       $entry.remove_entry();
+                               }
+                               return Err(res);
+                       }
+               }
+       }
+}
+
 macro_rules! try_chan_entry {
        ($self: ident, $res: expr, $entry: expr) => {
                match $res {
@@ -1688,7 +1754,7 @@ macro_rules! remove_channel {
        ($self: expr, $entry: expr) => {
                {
                        let channel = $entry.remove_entry().1;
-                       update_maps_on_chan_removal!($self, channel);
+                       update_maps_on_chan_removal!($self, &channel.context);
                        channel
                }
        }
@@ -1810,10 +1876,10 @@ macro_rules! handle_new_monitor_update {
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
                                        log_bytes!($chan.context.channel_id()[..]));
-                               update_maps_on_chan_removal!($self, $chan);
+                               update_maps_on_chan_removal!($self, &$chan.context);
                                let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
                                        "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
-                                       $chan.context.get_user_id(), $chan.force_shutdown(false),
+                                       $chan.context.get_user_id(), $chan.context.force_shutdown(false),
                                        $self.get_channel_update_for_broadcast(&$chan).ok()));
                                $remove;
                                res
@@ -2044,7 +2110,7 @@ where
                        let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
                        let their_features = &peer_state.latest_features;
                        let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
-                       match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
+                       match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
                                their_features, channel_value_satoshis, push_msat, user_channel_id, config,
                                self.best_block.read().unwrap().height(), outbound_scid_alias)
                        {
@@ -2058,7 +2124,7 @@ where
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
                let temporary_channel_id = channel.context.channel_id();
-               match peer_state.channel_by_id.entry(temporary_channel_id) {
+               match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(fuzzing) {
                                        return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
@@ -2076,7 +2142,7 @@ where
                Ok(temporary_channel_id)
        }
 
-       fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
+       fn list_funded_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
                // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
@@ -2103,7 +2169,37 @@ where
        /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
        /// more information.
        pub fn list_channels(&self) -> Vec<ChannelDetails> {
-               self.list_channels_with_filter(|_| true)
+               // Allocate our best estimate of the number of channels we have in the `res`
+               // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
+               // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+               // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
+               // unlikely as the `short_to_chan_info` map often contains 2 entries for
+               // the same channel.
+               let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
+               {
+                       let best_block_height = self.best_block.read().unwrap().height();
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               for (_channel_id, channel) in peer_state.channel_by_id.iter() {
+                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                               peer_state.latest_features.clone());
+                                       res.push(details);
+                               }
+                               for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
+                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                               peer_state.latest_features.clone());
+                                       res.push(details);
+                               }
+                               for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
+                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                               peer_state.latest_features.clone());
+                                       res.push(details);
+                               }
+                       }
+               }
+               res
        }
 
        /// Gets the list of usable channels, in random order. Useful as an argument to
@@ -2116,7 +2212,7 @@ where
                // Note we use is_live here instead of usable which leads to somewhat confused
                // internal/external nomenclature, but that's ok cause that's probably what the user
                // really wanted anyway.
-               self.list_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
+               self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
        }
 
        /// Gets the list of channels we have with a given counterparty, in random order.
@@ -2166,19 +2262,19 @@ where
        }
 
        /// Helper function that issues the channel close events
-       fn issue_channel_close_events(&self, channel: &Channel<<SP::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
+       fn issue_channel_close_events(&self, context: &ChannelContext<<SP::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
                let mut pending_events_lock = self.pending_events.lock().unwrap();
-               match channel.unbroadcasted_funding() {
+               match context.unbroadcasted_funding() {
                        Some(transaction) => {
                                pending_events_lock.push_back((events::Event::DiscardFunding {
-                                       channel_id: channel.context.channel_id(), transaction
+                                       channel_id: context.channel_id(), transaction
                                }, None));
                        },
                        None => {},
                }
                pending_events_lock.push_back((events::Event::ChannelClosed {
-                       channel_id: channel.context.channel_id(),
-                       user_channel_id: channel.context.get_user_id(),
+                       channel_id: context.channel_id(),
+                       user_channel_id: context.get_user_id(),
                        reason: closure_reason
                }, None));
        }
@@ -2225,7 +2321,7 @@ where
                                                                msg: channel_update
                                                        });
                                                }
-                                               self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+                                               self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
                                        }
                                        break Ok(());
                                },
@@ -2330,30 +2426,46 @@ where
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(peer_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
-               let mut chan = {
+               let (update_opt, counterparty_node_id) = {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
+                       let closure_reason = if let Some(peer_msg) = peer_msg {
+                               ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
+                       } else {
+                               ClosureReason::HolderForceClosed
+                       };
                        if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                               if let Some(peer_msg) = peer_msg {
-                                       self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) });
-                               } else {
-                                       self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
-                               }
-                               remove_channel!(self, chan)
+                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               self.issue_channel_close_events(&chan.get().context, closure_reason);
+                               let mut chan = remove_channel!(self, chan);
+                               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+                               (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
+                       } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
+                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               self.issue_channel_close_events(&chan.get().context, closure_reason);
+                               let mut chan = remove_channel!(self, chan);
+                               self.finish_force_close_channel(chan.context.force_shutdown(false));
+                               // Prefunded channel has no update
+                               (None, chan.context.get_counterparty_node_id())
+                       } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
+                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               self.issue_channel_close_events(&chan.get().context, closure_reason);
+                               let mut chan = remove_channel!(self, chan);
+                               self.finish_force_close_channel(chan.context.force_shutdown(false));
+                               // Prefunded channel has no update
+                               (None, chan.context.get_counterparty_node_id())
                        } else {
                                return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
                        }
                };
-               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-               self.finish_force_close_channel(chan.force_shutdown(broadcast));
-               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+               if let Some(update) = update_opt {
                        let mut peer_state = peer_state_mutex.lock().unwrap();
                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
                        });
                }
 
-               Ok(chan.context.get_counterparty_node_id())
+               Ok(counterparty_node_id)
        }
 
        fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
@@ -2414,9 +2526,10 @@ where
                }
        }
 
-       fn construct_recv_pending_htlc_info(&self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32],
-               payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result<PendingHTLCInfo, ReceiveError>
-       {
+       fn construct_recv_pending_htlc_info(
+               &self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], payment_hash: PaymentHash,
+               amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool
+       ) -> Result<PendingHTLCInfo, ReceiveError> {
                // final_incorrect_cltv_expiry
                if hop_data.outgoing_cltv_value > cltv_expiry {
                        return Err(ReceiveError {
@@ -2442,7 +2555,7 @@ where
                                msg: "The final CLTV expiry is too soon to handle",
                        });
                }
-               if hop_data.amt_to_forward > amt_msat {
+               if !allow_underpay && hop_data.amt_to_forward > amt_msat {
                        return Err(ReceiveError {
                                err_code: 19,
                                err_data: amt_msat.to_be_bytes().to_vec(),
@@ -2509,15 +2622,18 @@ where
                        incoming_amt_msat: Some(amt_msat),
                        outgoing_amt_msat: hop_data.amt_to_forward,
                        outgoing_cltv_value: hop_data.outgoing_cltv_value,
+                       skimmed_fee_msat: None,
                })
        }
 
-       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
+       fn decode_update_add_htlc_onion(
+               &self, msg: &msgs::UpdateAddHTLC
+       ) -> Result<(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg> {
                macro_rules! return_malformed_err {
                        ($msg: expr, $err_code: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
@@ -2548,7 +2664,7 @@ where
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                reason: HTLCFailReason::reason($err_code, $data.to_vec())
@@ -2567,11 +2683,186 @@ where
                                return_err!(err_msg, err_code, &[0; 0]);
                        },
                };
+               let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop {
+                       onion_utils::Hop::Forward {
+                               next_hop_data: msgs::OnionHopData {
+                                       format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward,
+                                       outgoing_cltv_value,
+                               }, ..
+                       } => {
+                               let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx,
+                                       msg.onion_routing_packet.public_key.unwrap(), &shared_secret);
+                               (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk))
+                       },
+                       // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
+                       // inbound channel's state.
+                       onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
+                       onion_utils::Hop::Forward {
+                               next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, ..
+                       } => {
+                               return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
+                       }
+               };
+
+               // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
+               // can't hold the outbound peer state lock at the same time as the inbound peer state lock.
+               if let Some((err, mut code, chan_update)) = loop {
+                       let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
+                       let forwarding_chan_info_opt = match id_option {
+                               None => { // unknown_next_peer
+                                       // Note that this is likely a timing oracle for detecting whether an scid is a
+                                       // phantom or an intercept.
+                                       if (self.default_configuration.accept_intercept_htlcs &&
+                                               fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
+                                               fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
+                                       {
+                                               None
+                                       } else {
+                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                       }
+                               },
+                               Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
+                       };
+                       let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
+                               let per_peer_state = self.per_peer_state.read().unwrap();
+                               let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+                               if peer_state_mutex_opt.is_none() {
+                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                               }
+                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
+                                       None => {
+                                               // Channel was removed. The short_to_chan_info and channel_by_id maps
+                                               // have no consistency guarantees.
+                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                                       },
+                                       Some(chan) => chan
+                               };
+                               if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+                                       // Note that the behavior here should be identical to the above block - we
+                                       // should NOT reveal the existence or non-existence of a private channel if
+                                       // we don't allow forwards outbound over them.
+                                       break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+                               }
+                               if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
+                                       // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+                                       // "refuse to forward unless the SCID alias was used", so we pretend
+                                       // we don't have the channel here.
+                                       break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
+                               }
+                               let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
+
+                               // Note that we could technically not return an error yet here and just hope
+                               // that the connection is reestablished or monitor updated by the time we get
+                               // around to doing the actual forward, but better to fail early if we can and
+                               // hopefully an attacker trying to path-trace payments cannot make this occur
+                               // on a small/per-node/per-channel scale.
+                               if !chan.context.is_live() { // channel_disabled
+                                       // If the channel_update we're going to return is disabled (i.e. the
+                                       // peer has been disabled for some time), return `channel_disabled`,
+                                       // otherwise return `temporary_channel_failure`.
+                                       if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+                                               break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+                                       } else {
+                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+                                       }
+                               }
+                               if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+                                       break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
+                               }
+                               if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
+                                       break Some((err, code, chan_update_opt));
+                               }
+                               chan_update_opt
+                       } else {
+                               if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
+                                       // We really should set `incorrect_cltv_expiry` here but as we're not
+                                       // forwarding over a real channel we can't generate a channel_update
+                                       // for it. Instead we just return a generic temporary_node_failure.
+                                       break Some((
+                                                       "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+                                                       0x2000 | 2, None,
+                                       ));
+                               }
+                               None
+                       };
+
+                       let cur_height = self.best_block.read().unwrap().height() + 1;
+                       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+                       // but we want to be robust wrt to counterparty packet sanitization (see
+                       // HTLC_FAIL_BACK_BUFFER rationale).
+                       if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
+                               break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
+                       }
+                       if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+                               break Some(("CLTV expiry is too far in the future", 21, None));
+                       }
+                       // If the HTLC expires ~now, don't bother trying to forward it to our
+                       // counterparty. They should fail it anyway, but we don't want to bother with
+                       // the round-trips or risk them deciding they definitely want the HTLC and
+                       // force-closing to ensure they get it if we're offline.
+                       // We previously had a much more aggressive check here which tried to ensure
+                       // our counterparty receives an HTLC which has *our* risk threshold met on it,
+                       // but there is no need to do that, and since we're a bit conservative with our
+                       // risk threshold it just results in failing to forward payments.
+                       if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
+                       }
+
+                       break None;
+               }
+               {
+                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
+                       if let Some(chan_update) = chan_update {
+                               if code == 0x1000 | 11 || code == 0x1000 | 12 {
+                                       msg.amount_msat.write(&mut res).expect("Writes cannot fail");
+                               }
+                               else if code == 0x1000 | 13 {
+                                       msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
+                               }
+                               else if code == 0x1000 | 20 {
+                                       // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+                                       0u16.write(&mut res).expect("Writes cannot fail");
+                               }
+                               (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+                               msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
+                               chan_update.write(&mut res).expect("Writes cannot fail");
+                       } else if code & 0x1000 == 0x1000 {
+                               // If we're trying to return an error that requires a `channel_update` but
+                               // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+                               // generate an update), just use the generic "temporary_node_failure"
+                               // instead.
+                               code = 0x2000 | 2;
+                       }
+                       return_err!(err, code, &res.0[..]);
+               }
+               Ok((next_hop, shared_secret, next_packet_pk_opt))
+       }
 
-               let pending_forward_info = match next_hop {
+       fn construct_pending_htlc_status<'a>(
+               &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
+               allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+       ) -> PendingHTLCStatus {
+               macro_rules! return_err {
+                       ($msg: expr, $err_code: expr, $data: expr) => {
+                               {
+                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                               channel_id: msg.channel_id,
+                                               htlc_id: msg.htlc_id,
+                                               reason: HTLCFailReason::reason($err_code, $data.to_vec())
+                                                       .get_encrypted_failure_packet(&shared_secret, &None),
+                                       }));
+                               }
+                       }
+               }
+               match decoded_hop {
                        onion_utils::Hop::Receive(next_hop_data) => {
                                // OUR PAYMENT!
-                               match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
+                               match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
+                                       msg.amount_msat, msg.cltv_expiry, None, allow_underpay)
+                               {
                                        Ok(info) => {
                                                // Note that we could obviously respond immediately with an update_fulfill_htlc
                                                // message, however that would leak that we are the recipient of this payment, so
@@ -2583,10 +2874,10 @@ where
                                }
                        },
                        onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
-                               let new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+                               debug_assert!(next_packet_pubkey_opt.is_some());
                                let outgoing_packet = msgs::OnionPacket {
                                        version: 0,
-                                       public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret),
+                                       public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)),
                                        hop_data: new_packet_bytes,
                                        hmac: next_hop_hmac.clone(),
                                };
@@ -2608,150 +2899,10 @@ where
                                        incoming_amt_msat: Some(msg.amount_msat),
                                        outgoing_amt_msat: next_hop_data.amt_to_forward,
                                        outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+                                       skimmed_fee_msat: None,
                                })
                        }
-               };
-
-               if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
-                       // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
-                       // with a short_channel_id of 0. This is important as various things later assume
-                       // short_channel_id is non-0 in any ::Forward.
-                       if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
-                               if let Some((err, mut code, chan_update)) = loop {
-                                       let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
-                                       let forwarding_chan_info_opt = match id_option {
-                                               None => { // unknown_next_peer
-                                                       // Note that this is likely a timing oracle for detecting whether an scid is a
-                                                       // phantom or an intercept.
-                                                       if (self.default_configuration.accept_intercept_htlcs &&
-                                                          fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
-                                                          fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
-                                                       {
-                                                               None
-                                                       } else {
-                                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
-                                                       }
-                                               },
-                                               Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
-                                       };
-                                       let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
-                                               let per_peer_state = self.per_peer_state.read().unwrap();
-                                               let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
-                                               if peer_state_mutex_opt.is_none() {
-                                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
-                                               }
-                                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
-                                               let peer_state = &mut *peer_state_lock;
-                                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
-                                                       None => {
-                                                               // Channel was removed. The short_to_chan_info and channel_by_id maps
-                                                               // have no consistency guarantees.
-                                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
-                                                       },
-                                                       Some(chan) => chan
-                                               };
-                                               if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
-                                                       // Note that the behavior here should be identical to the above block - we
-                                                       // should NOT reveal the existence or non-existence of a private channel if
-                                                       // we don't allow forwards outbound over them.
-                                                       break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
-                                               }
-                                               if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
-                                                       // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
-                                                       // "refuse to forward unless the SCID alias was used", so we pretend
-                                                       // we don't have the channel here.
-                                                       break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
-                                               }
-                                               let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
-
-                                               // Note that we could technically not return an error yet here and just hope
-                                               // that the connection is reestablished or monitor updated by the time we get
-                                               // around to doing the actual forward, but better to fail early if we can and
-                                               // hopefully an attacker trying to path-trace payments cannot make this occur
-                                               // on a small/per-node/per-channel scale.
-                                               if !chan.context.is_live() { // channel_disabled
-                                                       // If the channel_update we're going to return is disabled (i.e. the
-                                                       // peer has been disabled for some time), return `channel_disabled`,
-                                                       // otherwise return `temporary_channel_failure`.
-                                                       if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
-                                                               break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
-                                                       } else {
-                                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
-                                                       }
-                                               }
-                                               if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
-                                                       break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
-                                               }
-                                               if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
-                                                       break Some((err, code, chan_update_opt));
-                                               }
-                                               chan_update_opt
-                                       } else {
-                                               if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
-                                                       // We really should set `incorrect_cltv_expiry` here but as we're not
-                                                       // forwarding over a real channel we can't generate a channel_update
-                                                       // for it. Instead we just return a generic temporary_node_failure.
-                                                       break Some((
-                                                               "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
-                                                               0x2000 | 2, None,
-                                                       ));
-                                               }
-                                               None
-                                       };
-
-                                       let cur_height = self.best_block.read().unwrap().height() + 1;
-                                       // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
-                                       // but we want to be robust wrt to counterparty packet sanitization (see
-                                       // HTLC_FAIL_BACK_BUFFER rationale).
-                                       if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-                                               break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
-                                       }
-                                       if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
-                                               break Some(("CLTV expiry is too far in the future", 21, None));
-                                       }
-                                       // If the HTLC expires ~now, don't bother trying to forward it to our
-                                       // counterparty. They should fail it anyway, but we don't want to bother with
-                                       // the round-trips or risk them deciding they definitely want the HTLC and
-                                       // force-closing to ensure they get it if we're offline.
-                                       // We previously had a much more aggressive check here which tried to ensure
-                                       // our counterparty receives an HTLC which has *our* risk threshold met on it,
-                                       // but there is no need to do that, and since we're a bit conservative with our
-                                       // risk threshold it just results in failing to forward payments.
-                                       if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
-                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
-                                       }
-
-                                       break None;
-                               }
-                               {
-                                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
-                                       if let Some(chan_update) = chan_update {
-                                               if code == 0x1000 | 11 || code == 0x1000 | 12 {
-                                                       msg.amount_msat.write(&mut res).expect("Writes cannot fail");
-                                               }
-                                               else if code == 0x1000 | 13 {
-                                                       msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
-                                               }
-                                               else if code == 0x1000 | 20 {
-                                                       // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
-                                                       0u16.write(&mut res).expect("Writes cannot fail");
-                                               }
-                                               (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
-                                               msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
-                                               chan_update.write(&mut res).expect("Writes cannot fail");
-                                       } else if code & 0x1000 == 0x1000 {
-                                               // If we're trying to return an error that requires a `channel_update` but
-                                               // we're forwarding to a phantom or intercept "channel" (i.e. cannot
-                                               // generate an update), just use the generic "temporary_node_failure"
-                                               // instead.
-                                               code = 0x2000 | 2;
-                                       }
-                                       return_err!(err, code, &res.0[..]);
-                               }
-                       }
                }
-
-               pending_forward_info
        }
 
        /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
@@ -2798,6 +2949,7 @@ where
 
                self.get_channel_update_for_onion(short_channel_id, chan)
        }
+
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
                log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
@@ -2876,7 +3028,7 @@ where
                                                session_priv: session_priv.clone(),
                                                first_hop_htlc_msat: htlc_msat,
                                                payment_id,
-                                       }, onion_packet, &self.logger);
+                                       }, onion_packet, None, &self.logger);
                                match break_chan_entry!(self, send_res, chan) {
                                        Some(monitor_update) => {
                                                let update_id = monitor_update.update_id;
@@ -3091,7 +3243,7 @@ where
 
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
-       fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<SP::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
+       fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<<SP::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
                &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
        ) -> Result<(), APIError> {
                let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3100,21 +3252,24 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) {
-                       Some(mut chan) => {
+               let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) {
+                       Some(chan) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
                                let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
-                                       .map_err(|e| if let ChannelError::Close(msg) = e {
-                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None)
+                                       .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
+                                               let channel_id = chan.context.channel_id();
+                                               let user_id = chan.context.get_user_id();
+                                               let shutdown_res = chan.context.force_shutdown(false);
+                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None))
                                        } else { unreachable!(); });
                                match funding_res {
-                                       Ok(funding_msg) => (funding_msg, chan),
-                                       Err(_) => {
+                                       Ok((chan, funding_msg)) => (chan, funding_msg),
+                                       Err((chan, err)) => {
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
 
-                                               let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id());
+                                               let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
                                                return Err(APIError::ChannelUnavailable {
                                                        err: "Signer refused to sign the initial commitment transaction".to_owned()
                                                });
@@ -3340,13 +3495,16 @@ where
        /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
        ///
        /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
-       /// you from forwarding more than you received.
+       /// you from forwarding more than you received. See
+       /// [`HTLCIntercepted::expected_outbound_amount_msat`] for more on forwarding a different amount
+       /// than expected.
        ///
        /// Errors if the event was not handled in time, in which case the HTLC was automatically failed
        /// backwards.
        ///
        /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
        /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
+       /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
        // TODO: when we move to deciding the best outbound channel at forward time, only take
        // `next_node_id` and not `next_hop_channel_id`
        pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
@@ -3368,7 +3526,8 @@ where
                                        chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
                                },
                                None => return Err(APIError::ChannelUnavailable {
-                                       err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
+                                       err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+                                               log_bytes!(*next_hop_channel_id), next_node_id)
                                })
                        }
                };
@@ -3384,7 +3543,10 @@ where
                        },
                        _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
                };
+               let skimmed_fee_msat =
+                       payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
                let pending_htlc_info = PendingHTLCInfo {
+                       skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
                        outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
                };
 
@@ -3454,7 +3616,7 @@ where
                                                                                prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                                forward_info: PendingHTLCInfo {
                                                                                        routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
-                                                                                       outgoing_cltv_value, incoming_amt_msat: _
+                                                                                       outgoing_cltv_value, ..
                                                                                }
                                                                        }) => {
                                                                                macro_rules! failure_handler {
@@ -3516,7 +3678,10 @@ where
                                                                                                };
                                                                                                match next_hop {
                                                                                                        onion_utils::Hop::Receive(hop_data) => {
-                                                                                                               match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) {
+                                                                                                               match self.construct_recv_pending_htlc_info(hop_data,
+                                                                                                                       incoming_shared_secret, payment_hash, outgoing_amt_msat,
+                                                                                                                       outgoing_cltv_value, Some(phantom_shared_secret), false)
+                                                                                                               {
                                                                                                                        Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
                                                                                                                        Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                }
@@ -3567,7 +3732,7 @@ where
                                                                                prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
                                                                                forward_info: PendingHTLCInfo {
                                                                                        incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
-                                                                                       routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _,
+                                                                                       routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
                                                                                },
                                                                        }) => {
                                                                                log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
@@ -3581,7 +3746,7 @@ where
                                                                                });
                                                                                if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
                                                                                        payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                                       onion_packet, &self.logger)
+                                                                                       onion_packet, skimmed_fee_msat, &self.logger)
                                                                                {
                                                                                        if let ChannelError::Ignore(msg) = e {
                                                                                                log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
@@ -3625,7 +3790,8 @@ where
                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                forward_info: PendingHTLCInfo {
-                                                                       routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, ..
+                                                                       routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
+                                                                       skimmed_fee_msat, ..
                                                                }
                                                        }) => {
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
@@ -3666,6 +3832,7 @@ where
                                                                        total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
                                                                        cltv_expiry,
                                                                        onion_payload,
+                                                                       counterparty_skimmed_fee_msat: skimmed_fee_msat,
                                                                };
 
                                                                let mut committed_to_claimable = false;
@@ -3762,11 +3929,14 @@ where
                                                                                        htlcs.push(claimable_htlc);
                                                                                        let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
                                                                                        htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
+                                                                                       let counterparty_skimmed_fee_msat = htlcs.iter()
+                                                                                               .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
                                                                                        new_events.push_back((events::Event::PaymentClaimable {
                                                                                                receiver_node_id: Some(receiver_node_id),
                                                                                                payment_hash,
                                                                                                purpose: $purpose,
                                                                                                amount_msat,
+                                                                                               counterparty_skimmed_fee_msat,
                                                                                                via_channel_id: Some(prev_channel_id),
                                                                                                via_user_channel_id: Some(prev_user_channel_id),
                                                                                                claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
@@ -4764,16 +4934,17 @@ where
        fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
-               let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
+               let peers_without_funded_channels =
+                       self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
-               match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
+               let is_only_peer_channel = peer_state.total_channel_count() == 1;
+               match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) {
                        hash_map::Entry::Occupied(mut channel) => {
-                               if !channel.get().inbound_is_awaiting_accept() {
+                               if !channel.get().is_awaiting_accept() {
                                        return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
                                }
                                if accept_0conf {
@@ -4832,7 +5003,7 @@ where
                                let peer = peer_mtx.lock().unwrap();
                                if !maybe_count_peer(&*peer) { continue; }
                                let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
-                               if num_unfunded_channels == peer.channel_by_id.len() {
+                               if num_unfunded_channels == peer.total_channel_count() {
                                        peers_without_funded_channels += 1;
                                }
                        }
@@ -4845,12 +5016,19 @@ where
        ) -> usize {
                let mut num_unfunded_channels = 0;
                for (_, chan) in peer.channel_by_id.iter() {
+                       // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
+                       // which have not yet had any confirmations on-chain.
                        if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
                                chan.context.get_funding_tx_confirmations(best_block_height) == 0
                        {
                                num_unfunded_channels += 1;
                        }
                }
+               for (_, chan) in peer.inbound_v1_channel_by_id.iter() {
+                       if chan.context.minimum_depth().unwrap_or(1) != 0 {
+                               num_unfunded_channels += 1;
+                       }
+               }
                num_unfunded_channels
        }
 
@@ -4871,7 +5049,8 @@ where
                // Get the number of peers with channels, but without funded ones. We don't care too much
                // about peers that never open a channel, so we filter by peers that have at least one
                // channel, and then limit the number of those with unfunded channels.
-               let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
+               let channeled_peers_without_funding =
+                       self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
 
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -4885,7 +5064,7 @@ where
                // If this peer already has some channels, a new channel won't increase our number of peers
                // with unfunded channels, so as long as we aren't over the maximum number of unfunded
                // channels per-peer we can accept channels from a peer with existing ones.
-               if peer_state.channel_by_id.is_empty() &&
+               if peer_state.total_channel_count() == 0 &&
                        channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
                        !self.default_configuration.manually_accept_inbound_channels
                {
@@ -4901,7 +5080,7 @@ where
                                msg.temporary_channel_id.clone()));
                }
 
-               let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
+               let mut channel = match InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
                        &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
                {
@@ -4911,33 +5090,31 @@ where
                        },
                        Ok(res) => res
                };
-               match peer_state.channel_by_id.entry(channel.context.channel_id()) {
-                       hash_map::Entry::Occupied(_) => {
-                               self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
-                       },
-                       hash_map::Entry::Vacant(entry) => {
-                               if !self.default_configuration.manually_accept_inbound_channels {
-                                       if channel.context.get_channel_type().requires_zero_conf() {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
-                                       }
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg: channel.accept_inbound_channel(user_channel_id),
-                                       });
-                               } else {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                       pending_events.push_back((events::Event::OpenChannelRequest {
-                                               temporary_channel_id: msg.temporary_channel_id.clone(),
-                                               counterparty_node_id: counterparty_node_id.clone(),
-                                               funding_satoshis: msg.funding_satoshis,
-                                               push_msat: msg.push_msat,
-                                               channel_type: channel.context.get_channel_type().clone(),
-                                       }, None));
+               let channel_id = channel.context.channel_id();
+               let channel_exists = peer_state.has_channel(&channel_id);
+               if channel_exists {
+                       self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
+               } else {
+                       if !self.default_configuration.manually_accept_inbound_channels {
+                               if channel.context.get_channel_type().requires_zero_conf() {
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
                                }
-
-                               entry.insert(channel);
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+                                       node_id: counterparty_node_id.clone(),
+                                       msg: channel.accept_inbound_channel(user_channel_id),
+                               });
+                       } else {
+                               let mut pending_events = self.pending_events.lock().unwrap();
+                               pending_events.push_back((events::Event::OpenChannelRequest {
+                                       temporary_channel_id: msg.temporary_channel_id.clone(),
+                                       counterparty_node_id: counterparty_node_id.clone(),
+                                       funding_satoshis: msg.funding_satoshis,
+                                       push_msat: msg.push_msat,
+                                       channel_type: channel.context.get_channel_type().clone(),
+                               }, None));
                        }
+                       peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
                }
                Ok(())
        }
@@ -4952,9 +5129,9 @@ where
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                       match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
-                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
+                                       try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
                                        (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
@@ -4983,12 +5160,24 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let ((funding_msg, monitor), chan) =
-                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
+               let (chan, funding_msg, monitor) =
+                       match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) {
+                               Some(inbound_chan) => {
+                                       match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
+                                               Ok(res) => res,
+                                               Err((mut inbound_chan, err)) => {
+                                                       // We've already removed this inbound channel from the map in `PeerState`
+                                                       // above so at this point we just need to clean up any lingering entries
+                                                       // concerning this channel as it is safe to do so.
+                                                       update_maps_on_chan_removal!(self, &inbound_chan.context);
+                                                       let user_id = inbound_chan.context.get_user_id();
+                                                       let shutdown_res = inbound_chan.context.force_shutdown(false);
+                                                       return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
+                                                               msg.temporary_channel_id, user_id, shutdown_res, None));
+                                               },
+                                       }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+                               None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        };
 
                match peer_state.channel_by_id.entry(funding_msg.channel_id) {
@@ -5212,7 +5401,7 @@ where
                                        msg: update
                                });
                        }
-                       self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
+                       self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
                }
                Ok(())
        }
@@ -5227,7 +5416,7 @@ where
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
-               let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+               let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -5239,6 +5428,12 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
 
+                               let pending_forward_info = match decoded_hop_res {
+                                       Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
+                                               self.construct_pending_htlc_status(msg, shared_secret, next_hop,
+                                                       chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+                                       Err(e) => PendingHTLCStatus::Fail(e)
+                               };
                                let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
                                        // If the update_add is completely bogus, the call will Err and we will close,
                                        // but if we've sent a shutdown and they haven't acknowledged it yet, we just
@@ -5686,7 +5881,7 @@ where
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
                                                                        let mut chan = remove_channel!(self, chan_entry);
-                                                                       failed_channels.push(chan.force_shutdown(false));
+                                                                       failed_channels.push(chan.context.force_shutdown(false));
                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                        msg: update
@@ -5697,7 +5892,7 @@ where
                                                                        } else {
                                                                                ClosureReason::CommitmentTxConfirmed
                                                                        };
-                                                                       self.issue_channel_close_events(&chan, reason);
+                                                                       self.issue_channel_close_events(&chan.context, reason);
                                                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                node_id: chan.context.get_counterparty_node_id(),
                                                                                action: msgs::ErrorAction::SendErrorMessage {
@@ -5822,11 +6017,11 @@ where
                                                                        });
                                                                }
 
-                                                               self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
+                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
 
                                                                log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
-                                                               update_maps_on_chan_removal!(self, chan);
+                                                               update_maps_on_chan_removal!(self, &chan.context);
                                                                false
                                                        } else { true }
                                                },
@@ -6530,17 +6725,17 @@ where
                                                        }
                                                }
                                        } else if let Err(reason) = res {
-                                               update_maps_on_chan_removal!(self, channel);
+                                               update_maps_on_chan_removal!(self, &channel.context);
                                                // It looks like our counterparty went on-chain or funding transaction was
                                                // reorged out of the main chain. Close the channel.
-                                               failed_channels.push(channel.force_shutdown(true));
+                                               failed_channels.push(channel.context.force_shutdown(true));
                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
                                                }
                                                let reason_message = format!("{}", reason);
-                                               self.issue_channel_close_events(channel, reason);
+                                               self.issue_channel_close_events(&channel.context, reason);
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: channel.context.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
@@ -6790,12 +6985,22 @@ where
                                peer_state.channel_by_id.retain(|_, chan| {
                                        chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                        if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, chan);
-                                               self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
+                                               update_maps_on_chan_removal!(self, &chan.context);
+                                               self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
                                                return false;
                                        }
                                        true
                                });
+                               peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
+                                       update_maps_on_chan_removal!(self, &chan.context);
+                                       self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+                                       false
+                               });
+                               peer_state.outbound_v1_channel_by_id.retain(|_, chan| {
+                                       update_maps_on_chan_removal!(self, &chan.context);
+                                       self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+                                       false
+                               });
                                pending_msg_events.retain(|msg| {
                                        match msg {
                                                // V1 Channel Establishment
@@ -6877,6 +7082,8 @@ where
                                        }
                                        e.insert(Mutex::new(PeerState {
                                                channel_by_id: HashMap::new(),
+                                               outbound_v1_channel_by_id: HashMap::new(),
+                                               inbound_v1_channel_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
                                                pending_msg_events: Vec::new(),
                                                monitor_update_blocked_actions: BTreeMap::new(),
@@ -6952,7 +7159,9 @@ where
                                if peer_state_mutex_opt.is_none() { return; }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               peer_state.channel_by_id.keys().cloned().collect()
+                               peer_state.channel_by_id.keys().cloned()
+                                       .chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
+                                       .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
                        };
                        for channel_id in channel_ids {
                                // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
@@ -6966,7 +7175,7 @@ where
                                if peer_state_mutex_opt.is_none() { return; }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
+                               if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
                                        if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
@@ -7264,6 +7473,7 @@ impl_writeable_tlv_based!(PendingHTLCInfo, {
        (6, outgoing_amt_msat, required),
        (8, outgoing_cltv_value, required),
        (9, incoming_amt_msat, option),
+       (10, skimmed_fee_msat, option),
 });
 
 
@@ -7362,6 +7572,7 @@ impl Writeable for ClaimableHTLC {
                        (5, self.total_value_received, option),
                        (6, self.cltv_expiry, required),
                        (8, keysend_preimage, option),
+                       (10, self.counterparty_skimmed_fee_msat, option),
                });
                Ok(())
        }
@@ -7369,24 +7580,19 @@ impl Writeable for ClaimableHTLC {
 
 impl Readable for ClaimableHTLC {
        fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
-               let mut prev_hop = crate::util::ser::RequiredWrapper(None);
-               let mut value = 0;
-               let mut sender_intended_value = None;
-               let mut payment_data: Option<msgs::FinalOnionHopData> = None;
-               let mut cltv_expiry = 0;
-               let mut total_value_received = None;
-               let mut total_msat = None;
-               let mut keysend_preimage: Option<PaymentPreimage> = None;
-               read_tlv_fields!(reader, {
+               _init_and_read_tlv_fields!(reader, {
                        (0, prev_hop, required),
                        (1, total_msat, option),
-                       (2, value, required),
+                       (2, value_ser, required),
                        (3, sender_intended_value, option),
-                       (4, payment_data, option),
+                       (4, payment_data_opt, option),
                        (5, total_value_received, option),
                        (6, cltv_expiry, required),
-                       (8, keysend_preimage, option)
+                       (8, keysend_preimage, option),
+                       (10, counterparty_skimmed_fee_msat, option),
                });
+               let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
+               let value = value_ser.0.unwrap();
                let onion_payload = match keysend_preimage {
                        Some(p) => {
                                if payment_data.is_some() {
@@ -7415,7 +7621,8 @@ impl Readable for ClaimableHTLC {
                        total_value_received,
                        total_msat: total_msat.unwrap(),
                        onion_payload,
-                       cltv_expiry,
+                       cltv_expiry: cltv_expiry.0.unwrap(),
+                       counterparty_skimmed_fee_msat,
                })
        }
 }
@@ -7963,7 +8170,7 @@ where
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
-                                       let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
+                                       let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id, funding_txo, update
@@ -8021,7 +8228,7 @@ where
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
                                // was in-progress, we never broadcasted the funding transaction and can still
                                // safely discard the channel.
-                               let _ = channel.force_shutdown(false);
+                               let _ = channel.context.force_shutdown(false);
                                channel_closures.push_back((events::Event::ChannelClosed {
                                        channel_id: channel.context.channel_id(),
                                        user_channel_id: channel.context.get_user_id(),
@@ -8080,6 +8287,8 @@ where
                        let peer_pubkey = Readable::read(reader)?;
                        let peer_state = PeerState {
                                channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
+                               outbound_v1_channel_by_id: HashMap::new(),
+                               inbound_v1_channel_by_id: HashMap::new(),
                                latest_features: Readable::read(reader)?,
                                pending_msg_events: Vec::new(),
                                monitor_update_blocked_actions: BTreeMap::new(),