Merge pull request #2760 from TheBlueMatt/2023-11-chan-close-loop
[rust-lightning] / lightning / src / ln / channelmanager.rs
index c5fa1d21c71087cbe808e4380b793636b2c91620..7c6ee1e429c1d1a325759d22c0a73207fa50d730 100644 (file)
@@ -109,50 +109,79 @@ use crate::ln::script::ShutdownScript;
 // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
 // our payment, which we can use to decode errors or inform the user that the payment was sent.
 
-/// Routing info for an inbound HTLC onion.
+/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 pub enum PendingHTLCRouting {
-       /// A forwarded HTLC.
+       /// An HTLC which should be forwarded on to another node.
        Forward {
-               /// BOLT 4 onion packet.
+               /// The onion which should be included in the forwarded HTLC, telling the next hop what to
+               /// do with the HTLC.
                onion_packet: msgs::OnionPacket,
-               /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
-               /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
+               /// The short channel ID of the channel which we were instructed to forward this HTLC to.
+               ///
+               /// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
+               /// to the receiving node, such as one returned from
+               /// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
                /// Set if this HTLC is being forwarded within a blinded path.
                blinded: Option<BlindedForward>,
        },
-       /// An HTLC paid to an invoice (supposedly) generated by us.
-       /// At this point, we have not checked that the invoice being paid was actually generated by us,
-       /// but rather it's claiming to pay an invoice of ours.
+       /// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
+       ///
+       /// Note that at this point, we have not checked that the invoice being paid was actually
+       /// generated by us, but rather it's claiming to pay an invoice of ours.
        Receive {
-               /// Payment secret and total msat received.
+               /// Information about the amount the sender intended to pay and (potential) proof that this
+               /// is a payment for an invoice we generated. This proof of payment is is also used for
+               /// linking MPP parts of a larger payment.
                payment_data: msgs::FinalOnionHopData,
-               /// See [`RecipientOnionFields::payment_metadata`] for more info.
+               /// Additional data which we (allegedly) instructed the sender to include in the onion.
+               ///
+               /// For HTLCs received by LDK, this will ultimately be exposed in
+               /// [`Event::PaymentClaimable::onion_fields`] as
+               /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
                /// CLTV expiry of the received HTLC.
+               ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                incoming_cltv_expiry: u32,
-               /// Shared secret derived using a phantom node secret key. If this field is Some, the
-               /// payment was sent to a phantom node (one hop beyond the current node), but can be
-               /// settled by this node.
+               /// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
+               /// provide the onion shared secret used to decrypt the next level of forwarding
+               /// instructions.
                phantom_shared_secret: Option<[u8; 32]>,
-               /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+               /// Custom TLVs which were set by the sender.
+               ///
+               /// For HTLCs received by LDK, this will ultimately be exposed in
+               /// [`Event::PaymentClaimable::onion_fields`] as
+               /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
        },
-       /// Incoming keysend (sender provided the preimage in a TLV).
+       /// The onion indicates that this is for payment to us but which contains the preimage for
+       /// claiming included, and is unrelated to any invoice we'd previously generated (aka a
+       /// "keysend" or "spontaneous" payment).
        ReceiveKeysend {
-               /// This was added in 0.0.116 and will break deserialization on downgrades.
+               /// Information about the amount the sender intended to pay and possibly a token to
+               /// associate MPP parts of a larger payment.
+               ///
+               /// This will only be filled in if receiving MPP keysend payments is enabled, and it being
+               /// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
                payment_data: Option<msgs::FinalOnionHopData>,
                /// Preimage for this onion payment. This preimage is provided by the sender and will be
                /// used to settle the spontaneous payment.
                payment_preimage: PaymentPreimage,
-               /// See [`RecipientOnionFields::payment_metadata`] for more info.
+               /// Additional data which we (allegedly) instructed the sender to include in the onion.
+               ///
+               /// For HTLCs received by LDK, this will ultimately bubble back up as
+               /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
                /// CLTV expiry of the received HTLC.
+               ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                incoming_cltv_expiry: u32,
-               /// See [`RecipientOnionFields::custom_tlvs`] for more info.
+               /// Custom TLVs which were set by the sender.
+               ///
+               /// For HTLCs received by LDK, these will ultimately bubble back up as
+               /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
        },
 }
@@ -179,25 +208,47 @@ impl PendingHTLCRouting {
        }
 }
 
-/// Full details of an incoming HTLC, including routing info.
+/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
+/// should go next.
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 pub struct PendingHTLCInfo {
        /// Further routing details based on whether the HTLC is being forwarded or received.
        pub routing: PendingHTLCRouting,
-       /// Shared secret from the previous hop.
-       /// Used encrypt failure packets in the event that the HTLC needs to be failed backwards.
+       /// The onion shared secret we build with the sender used to decrypt the onion.
+       ///
+       /// This is later used to encrypt failure packets in the event that the HTLC is failed.
        pub incoming_shared_secret: [u8; 32],
        /// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
        pub payment_hash: PaymentHash,
-       /// Amount offered by this HTLC.
-       pub incoming_amt_msat: Option<u64>, // Added in 0.0.113
-       /// Sender intended amount to forward or receive (actual amount received
-       /// may overshoot this in either case)
+       /// Amount received in the incoming HTLC.
+       ///
+       /// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
+       /// versions.
+       pub incoming_amt_msat: Option<u64>,
+       /// The amount the sender indicated should be forwarded on to the next hop or amount the sender
+       /// intended for us to receive for received payments.
+       ///
+       /// If the received amount is less than this for received payments, an intermediary hop has
+       /// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
+       /// it along another path).
+       ///
+       /// Because nodes can take less than their required fees, and because senders may wish to
+       /// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
+       /// received payments. In such cases, recipients must handle this HTLC as if it had received
+       /// [`Self::outgoing_amt_msat`].
        pub outgoing_amt_msat: u64,
-       /// Outgoing timelock expiration blockheight.
+       /// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
+       /// should have been set on the received HTLC for received payments).
        pub outgoing_cltv_value: u32,
-       /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are
-       /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed.
+       /// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
+       ///
+       /// If this is a payment for forwarding, this is the fee we are taking before forwarding the
+       /// HTLC.
+       ///
+       /// If this is a received payment, this is the fee that our counterparty took.
+       ///
+       /// This is used to allow LSPs to take fees as a part of payments, without the sender having to
+       /// shoulder them.
        pub skimmed_fee_msat: Option<u64>,
 }
 
@@ -2663,9 +2714,10 @@ where
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
-               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
-               let shutdown_result;
-               loop {
+               let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
+               let mut shutdown_result = None;
+
+               {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -2679,11 +2731,9 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let their_features = &peer_state.latest_features;
-                                               let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs) =
                                                        chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                                failed_htlcs = htlcs;
-                                               shutdown_result = local_shutdown_result;
-                                               debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
 
                                                // We can send the `shutdown` message before updating the `ChannelMonitor`
                                                // here as we don't need the monitor update to complete until we send a
@@ -2700,30 +2750,20 @@ where
                                                if let Some(monitor_update) = monitor_update_opt.take() {
                                                        handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan);
-                                                       break;
-                                               }
-
-                                               if chan.is_shutdown() {
-                                                       if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: channel_update
-                                                                       });
-                                                               }
-                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
-                                                       }
                                                }
-                                               break;
+                                       } else {
+                                               self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
+                                               let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
-                                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
-                                       // it does not exist for this peer. Either way, we can attempt to force-close it.
-                                       //
-                                       // An appropriate error will be returned for non-existence of the channel if that's the case.
-                                       mem::drop(peer_state_lock);
-                                       mem::drop(per_peer_state);
-                                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+                                       return Err(APIError::ChannelUnavailable {
+                                               err: format!(
+                                                       "Channel with id {} not found for the passed counterparty node_id {}",
+                                                       channel_id, counterparty_node_id,
+                                               )
+                                       });
                                },
                        }
                }
@@ -2808,7 +2848,10 @@ where
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
 
-               log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+               let logger = WithContext::from(
+                       &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+               );
+               log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
@@ -2967,7 +3010,7 @@ where
        }
 
        fn decode_update_add_htlc_onion(
-               &self, msg: &msgs::UpdateAddHTLC
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
        ) -> Result<
                (onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
        > {
@@ -2986,7 +3029,7 @@ where
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
                                        log_info!(
-                                               WithContext::from(&self.logger, None, Some(msg.channel_id)),
+                                               WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
                                                "Failed to accept/forward incoming HTLC: {}", $msg
                                        );
                                        let (err_code, err_data) = if is_blinded {
@@ -3133,13 +3176,15 @@ where
        }
 
        fn construct_pending_htlc_status<'a>(
-               &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
-               allow_underpay: bool, next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
+               decoded_hop: onion_utils::Hop, allow_underpay: bool,
+               next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
        ) -> PendingHTLCStatus {
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
-                                       log_info!(WithContext::from(&self.logger, None, Some(msg.channel_id)), "Failed to accept/forward incoming HTLC: {}", $msg);
+                                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+                                       log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
                                        return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
@@ -3276,23 +3321,33 @@ where
                } = args;
                // The top-level caller should hold the total_consistency_lock read lock.
                debug_assert!(self.total_consistency_lock.try_write().is_err());
-               log_trace!(WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None),
-                       "Attempting to send payment with payment hash {} along path with next hop {}",
-                       payment_hash, path.hops.first().unwrap().short_channel_id);
                let prng_seed = self.entropy_source.get_secure_random_bytes();
                let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
 
                let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
                        &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
                        payment_hash, keysend_preimage, prng_seed
-               )?;
+               ).map_err(|e| {
+                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                       log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
+                       e
+               })?;
 
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
-                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
+                               None => {
+                                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                                       log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
+                                       return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
+                               },
                                Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        };
 
+                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+                       log_trace!(logger,
+                               "Attempting to send payment with payment hash {} along path with next hop {}",
+                               payment_hash, path.hops.first().unwrap().short_channel_id);
+
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
                                .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
@@ -3664,7 +3719,7 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
-                       Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
+                       Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
                                let logger = WithChannelContext::from(&self.logger, &chan.context);
@@ -3677,7 +3732,7 @@ where
                                                (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
                                        } else { unreachable!(); });
                                match funding_res {
-                                       Ok((chan, funding_msg)) => (chan, funding_msg),
+                                       Ok(funding_msg) => (chan, funding_msg),
                                        Err((chan, err)) => {
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
@@ -3717,7 +3772,7 @@ where
                                if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
-                               e.insert(ChannelPhase::Funded(chan));
+                               e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
                }
                Ok(())
@@ -4044,7 +4099,8 @@ where
                                None => {
                                        let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
                                                next_hop_channel_id, next_node_id);
-                                       log_error!(self.logger, "{} when attempting to forward intercepted HTLC", error);
+                                       let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+                                       log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
                                        return Err(APIError::ChannelUnavailable {
                                                err: error
                                        })
@@ -4132,6 +4188,7 @@ where
 
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
                                if short_chan_id != 0 {
+                                       let mut forwarding_counterparty = None;
                                        macro_rules! forwarding_channel_not_found {
                                                () => {
                                                        for forward_info in pending_forwards.drain(..) {
@@ -4145,7 +4202,8 @@ where
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
-                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+                                                                                               log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
 
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: prev_short_channel_id,
@@ -4244,6 +4302,7 @@ where
                                                        continue;
                                                }
                                        };
+                                       forwarding_counterparty = Some(counterparty_node_id);
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                        let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() {
@@ -5347,6 +5406,7 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
+                               let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
@@ -5357,7 +5417,8 @@ where
                                        if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                // We got a temporary failure updating monitor, but will claim the
                                                // HTLC when the monitor updating is restored (or on chain).
-                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
+                                               let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
+                                               log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                        } else { errs.push((pk, err)); }
                                }
                        }
@@ -6238,22 +6299,43 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
-                               match chan_phase_entry.get_mut() {
-                                       ChannelPhase::Funded(ref mut chan) => {
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                               let monitor = try_chan_phase_entry!(self,
-                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger), chan_phase_entry);
-                                               if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
-                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
-                                                       Ok(())
-                                               } else {
-                                                       try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
+                       hash_map::Entry::Occupied(chan_phase_entry) => {
+                               if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
+                                       let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
+                                       let logger = WithContext::from(
+                                               &self.logger,
+                                               Some(chan.context.get_counterparty_node_id()),
+                                               Some(chan.context.channel_id())
+                                       );
+                                       let res =
+                                               chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
+                                       match res {
+                                               Ok((chan, monitor)) => {
+                                                       if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
+                                                               // We really should be able to insert here without doing a second
+                                                               // lookup, but sadly rust stdlib doesn't currently allow keeping
+                                                               // the original Entry around with the value removed.
+                                                               let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
+                                                               if let ChannelPhase::Funded(ref mut chan) = &mut chan {
+                                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
+                                                               } else { unreachable!(); }
+                                                               Ok(())
+                                                       } else {
+                                                               let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
+                                                       }
+                                               },
+                                               Err((chan, e)) => {
+                                                       debug_assert!(matches!(e, ChannelError::Close(_)),
+                                                               "We don't have a channel anymore, so the error better have expected close");
+                                                       // We've already removed this outbound channel from the map in
+                                                       // `PeerState` above so at this point we just need to clean up any
+                                                       // lingering entries concerning this channel as it is safe to do so.
+                                                       return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
                                                }
-                                       },
-                                       _ => {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
-                                       },
+                                       }
+                               } else {
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
                                }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -6455,7 +6537,7 @@ where
                // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
                // closing a channel), so any changes are likely to be lost on restart!
 
-               let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
+               let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -6469,8 +6551,10 @@ where
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        let pending_forward_info = match decoded_hop_res {
                                                Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
-                                                       self.construct_pending_htlc_status(msg, shared_secret, next_hop,
-                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+                                                       self.construct_pending_htlc_status(
+                                                               msg, counterparty_node_id, shared_secret, next_hop,
+                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
+                                                       ),
                                                Err(e) => PendingHTLCStatus::Fail(e)
                                        };
                                        let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
@@ -7177,29 +7261,34 @@ where
 
                let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| {
                        let node_id = phase.context().get_counterparty_node_id();
-                       if let ChannelPhase::Funded(chan) = phase {
-                               let msgs = chan.signer_maybe_unblocked(&self.logger);
-                               if let Some(updates) = msgs.commitment_update {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id,
-                                               updates,
-                                       });
-                               }
-                               if let Some(msg) = msgs.funding_signed {
-                                       pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
-                                               node_id,
-                                               msg,
-                                       });
-                               }
-                               if let Some(msg) = msgs.funding_created {
-                                       pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
-                                               node_id,
-                                               msg,
-                                       });
+                       match phase {
+                               ChannelPhase::Funded(chan) => {
+                                       let msgs = chan.signer_maybe_unblocked(&self.logger);
+                                       if let Some(updates) = msgs.commitment_update {
+                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id,
+                                                       updates,
+                                               });
+                                       }
+                                       if let Some(msg) = msgs.funding_signed {
+                                               pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+                                                       node_id,
+                                                       msg,
+                                               });
+                                       }
+                                       if let Some(msg) = msgs.channel_ready {
+                                               send_channel_ready!(self, pending_msg_events, chan, msg);
+                                       }
                                }
-                               if let Some(msg) = msgs.channel_ready {
-                                       send_channel_ready!(self, pending_msg_events, chan, msg);
+                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                       if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) {
+                                               pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+                                                       node_id,
+                                                       msg,
+                                               });
+                                       }
                                }
+                               ChannelPhase::UnfundedInboundV1(_) => {},
                        }
                };
 
@@ -10014,7 +10103,7 @@ where
                                        log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
                                        log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
-                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                               log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                        &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        }
                                        if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
@@ -10410,7 +10499,7 @@ where
                                let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
                                let chan_id = monitor.get_funding_txo().0.to_channel_id();
                                if counterparty_opt.is_none() {
-                                       let logger = WithContext::from(&args.logger, None, Some(chan_id));
+                                       let logger = WithChannelMonitor::from(&args.logger, monitor);
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
                                                        if path.hops.is_empty() {