Log pending in-flight updates when we are missing a monitor
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 81b9d766d7b39a3e91b633e62ea72b94e1eab5bb..27ab6ff0992f2ced1b54f922fd4a1de032b10fab 100644 (file)
@@ -43,14 +43,12 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
 use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
-use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
-use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
-use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
+use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
 use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
@@ -65,8 +63,9 @@ use crate::offers::merkle::SignError;
 use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
 use crate::offers::parse::Bolt12SemanticError;
 use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
-use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider};
+use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
@@ -75,6 +74,13 @@ use crate::util::string::UntrustedString;
 use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
 use crate::util::logger::{Level, Logger, WithContext};
 use crate::util::errors::APIError;
+#[cfg(not(c_bindings))]
+use {
+       crate::routing::router::DefaultRouter,
+       crate::routing::gossip::NetworkGraph,
+       crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
+       crate::sign::KeysManager,
+};
 
 use alloc::collections::{btree_map, BTreeMap};
 
@@ -196,15 +202,16 @@ pub struct BlindedForward {
        /// onion payload if we're the introduction node. Useful for calculating the next hop's
        /// [`msgs::UpdateAddHTLC::blinding_point`].
        pub inbound_blinding_point: PublicKey,
-       // Another field will be added here when we support forwarding as a non-intro node.
+       /// If needed, this determines how this HTLC should be failed backwards, based on whether we are
+       /// the introduction node.
+       pub failure: BlindedFailure,
 }
 
 impl PendingHTLCRouting {
        // Used to override the onion failure code and data if the HTLC is blinded.
        fn blinded_failure(&self) -> Option<BlindedFailure> {
-               // TODO: needs update when we support forwarding blinded HTLCs as non-intro node
                match self {
-                       Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode),
+                       Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
                        Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
                        _ => None,
                }
@@ -299,10 +306,15 @@ pub(super) enum HTLCForwardInfo {
        },
 }
 
-// Used for failing blinded HTLCs backwards correctly.
+/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
+/// which determines the failure message that should be used.
 #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
-enum BlindedFailure {
+pub enum BlindedFailure {
+       /// This HTLC is being failed backwards by the introduction node, and thus should be failed with
+       /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`.
        FromIntroductionNode,
+       /// This HTLC is being failed backwards by a blinded node within the path, and thus should be
+       /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`.
        FromBlindedNode,
 }
 
@@ -2878,6 +2890,7 @@ where
                                reason: shutdown_res.closure_reason,
                                counterparty_node_id: Some(shutdown_res.counterparty_node_id),
                                channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+                               channel_funding_txo: shutdown_res.channel_funding_txo,
                        }, None));
 
                        if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
@@ -3018,8 +3031,9 @@ where
 
                let is_intro_node_forward = match next_hop {
                        onion_utils::Hop::Forward {
-                               // TODO: update this when we support blinded forwarding as non-intro node
-                               next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+                               next_hop_data: msgs::InboundOnionPayload::BlindedForward {
+                                       intro_node_blinding_point: Some(_), ..
+                               }, ..
                        } => true,
                        _ => false,
                };
@@ -3228,14 +3242,14 @@ where
                                                // delay) once they've send us a commitment_signed!
                                                PendingHTLCStatus::Forward(info)
                                        },
-                                       Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+                                       Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
                                }
                        },
                        onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
                                match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
                                        new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
                                        Ok(info) => PendingHTLCStatus::Forward(info),
-                                       Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+                                       Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
                                }
                        }
                }
@@ -3970,6 +3984,7 @@ where
                                                });
                                }
                        }
+                       mem::drop(funding_batch_states);
                        for shutdown_result in shutdown_results.drain(..) {
                                self.finish_close_channel(shutdown_result);
                        }
@@ -4303,7 +4318,7 @@ where
                                                                                                                        current_height, self.default_configuration.accept_mpp_keysend)
                                                                                                                {
                                                                                                                        Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
-                                                                                                                       Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+                                                                                                                       Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                }
                                                                                                        },
                                                                                                        _ => panic!(),
@@ -4345,7 +4360,7 @@ where
                                        if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
                                                let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                for forward_info in pending_forwards.drain(..) {
-                                                       match forward_info {
+                                                       let queue_fail_htlc_res = match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                        forward_info: PendingHTLCInfo {
@@ -4364,7 +4379,7 @@ where
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                // Phantom payments are only PendingHTLCRouting::Receive.
                                                                                phantom_shared_secret: None,
-                                                                               blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+                                                                               blinded_failure: blinded.map(|b| b.failure),
                                                                        });
                                                                        let next_blinding_point = blinded.and_then(|b| {
                                                                                let encrypted_tlvs_ss = self.node_signer.ecdh(
@@ -4391,40 +4406,35 @@ where
                                                                                ));
                                                                                continue;
                                                                        }
+                                                                       None
                                                                },
                                                                HTLCForwardInfo::AddHTLC { .. } => {
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
                                                                        log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       if let Err(e) = chan.queue_fail_htlc(
-                                                                               htlc_id, err_packet, &&logger
-                                                                       ) {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                               } else {
-                                                                                       panic!("Stated return value requirements in queue_fail_htlc() were not met");
-                                                                               }
-                                                                               // fail-backs are best-effort, we probably already have one
-                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                               continue;
-                                                                       }
+                                                                       Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
                                                                },
                                                                HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
-                                                                       log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                               } else {
-                                                                                       panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
-                                                                               }
-                                                                               // fail-backs are best-effort, we probably already have one
-                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                               continue;
-                                                                       }
+                                                                       log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       let res = chan.queue_fail_malformed_htlc(
+                                                                               htlc_id, failure_code, sha256_of_onion, &&logger
+                                                                       );
+                                                                       Some((res, htlc_id))
                                                                },
+                                                       };
+                                                       if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+                                                               if let Err(e) = queue_fail_htlc_res {
+                                                                       if let ChannelError::Ignore(msg) = e {
+                                                                               log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                       } else {
+                                                                               panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
+                                                                       }
+                                                                       // fail-backs are best-effort, we probably already have one
+                                                                       // pending, and if not that's OK, if not, the channel is on
+                                                                       // the chain and sending the HTLC-Timeout is their problem.
+                                                                       continue;
+                                                               }
                                                        }
                                                }
                                        } else {
@@ -5995,13 +6005,20 @@ where
        }
 
        fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
                        self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+               .ok_or_else(|| {
+                       let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
+                       log_error!(logger, "{}", err_str);
+
+                       APIError::ChannelUnavailable { err: err_str }
+               })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let is_only_peer_channel = peer_state.total_channel_count() == 1;
@@ -6016,9 +6033,19 @@ where
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
-                                       &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
+                                       &self.logger, accept_0conf).map_err(|e| {
+                                               let err_str = e.to_string();
+                                               log_error!(logger, "{}", err_str);
+
+                                               APIError::ChannelUnavailable { err: err_str }
+                                       })
+                               }
+                       _ => {
+                               let err_str = "No such channel awaiting to be accepted.".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               Err(APIError::APIMisuseError { err: err_str })
                        }
-                       _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
                }?;
 
                if accept_0conf {
@@ -6032,7 +6059,10 @@ where
                                }
                        };
                        peer_state.pending_msg_events.push(send_msg_err_event);
-                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                       let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+                       log_error!(logger, "{}", err_str);
+
+                       return Err(APIError::APIMisuseError { err: err_str });
                } else {
                        // If this peer already has some channels, a new channel won't increase our number of peers
                        // with unfunded channels, so as long as we aren't over the maximum number of unfunded
@@ -6045,7 +6075,10 @@ where
                                        }
                                };
                                peer_state.pending_msg_events.push(send_msg_err_event);
-                               return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+                               let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               return Err(APIError::APIMisuseError { err: err_str });
                        }
                }
 
@@ -6168,13 +6201,18 @@ where
 
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
                if self.default_configuration.manually_accept_inbound_channels {
+                       let channel_type = channel::channel_type_from_open_channel(
+                                       &msg, &peer_state.latest_features, &self.channel_type_features()
+                               ).map_err(|e|
+                                       MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+                               )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
                                temporary_channel_id: msg.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
                                funding_satoshis: msg.funding_satoshis,
                                push_msat: msg.push_msat,
-                               channel_type: msg.channel_type.clone().unwrap(),
+                               channel_type,
                        }, None));
                        peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
                                open_channel_msg: msg.clone(),
@@ -8975,13 +9013,7 @@ where
                                let pending_msg_events = &mut peer_state.pending_msg_events;
 
                                peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
-                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
-                                               // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
-                                               // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
-                                               // worry about closing and removing them.
-                                               debug_assert!(false);
-                                               None
-                                       }
+                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
                                ).for_each(|chan| {
                                        let logger = WithChannelContext::from(&self.logger, &chan.context);
                                        pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
@@ -9327,6 +9359,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
        features.set_channel_type_optional();
        features.set_scid_privacy_optional();
        features.set_zero_conf_optional();
+       features.set_route_blinding_optional();
        if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
                features.set_anchors_zero_fee_htlc_tx_optional();
        }
@@ -9472,6 +9505,7 @@ impl_writeable_tlv_based!(PhantomRouteHints, {
 
 impl_writeable_tlv_based!(BlindedForward, {
        (0, inbound_blinding_point, required),
+       (1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
 });
 
 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
@@ -10304,6 +10338,7 @@ where
                                                reason: ClosureReason::OutdatedChannelManager,
                                                counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
                                                channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+                                               channel_funding_txo: channel.context.get_funding_txo(),
                                        }, None));
                                        for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
                                                let mut found_htlc = false;
@@ -10357,6 +10392,7 @@ where
                                        reason: ClosureReason::DisconnectedPeer,
                                        counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+                                       channel_funding_txo: channel.context.get_funding_txo(),
                                }, None));
                        } else {
                                log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
@@ -10644,6 +10680,7 @@ where
                                        log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
                                        log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
@@ -12096,8 +12133,8 @@ mod tests {
                let sender_intended_amt_msat = 100;
                let extra_fee_msat = 10;
                let hop_data = msgs::InboundOnionPayload::Receive {
-                       amt_msat: 100,
-                       outgoing_cltv_value: 42,
+                       sender_intended_htlc_amt_msat: 100,
+                       cltv_expiry_height: 42,
                        payment_metadata: None,
                        keysend_preimage: None,
                        payment_data: Some(msgs::FinalOnionHopData {
@@ -12108,7 +12145,7 @@ mod tests {
                // Check that if the amount we received + the penultimate hop extra fee is less than the sender
                // intended amount, we fail the payment.
                let current_height: u32 = node[0].node.best_block.read().unwrap().height();
-               if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) =
+               if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
                        create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
                                sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
                                current_height, node[0].node.default_configuration.accept_mpp_keysend)
@@ -12118,8 +12155,8 @@ mod tests {
 
                // If amt_received + extra_fee is equal to the sender intended amount, we're fine.
                let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
-                       amt_msat: 100,
-                       outgoing_cltv_value: 42,
+                       sender_intended_htlc_amt_msat: 100,
+                       cltv_expiry_height: 42,
                        payment_metadata: None,
                        keysend_preimage: None,
                        payment_data: Some(msgs::FinalOnionHopData {
@@ -12142,8 +12179,8 @@ mod tests {
 
                let current_height: u32 = node[0].node.best_block.read().unwrap().height();
                let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
-                       amt_msat: 100,
-                       outgoing_cltv_value: 22,
+                       sender_intended_htlc_amt_msat: 100,
+                       cltv_expiry_height: 22,
                        payment_metadata: None,
                        keysend_preimage: None,
                        payment_data: Some(msgs::FinalOnionHopData {