X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=456c9da528f5c913645134e4e415b1a39ceb46bd;hb=9db962c7192643d10722b7b675d45068e14407c8;hp=b18e7a8f2df002b17b54d1358f335bcf27647088;hpb=0a0c54cc3122be58a034bcd699e9df34fd734678;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b18e7a8f..456c9da5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -45,8 +45,8 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters, Router}; -use crate::routing::scoring::ProbabilisticScorer; +use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router}; +use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use crate::ln::msgs; use crate::ln::onion_utils; use crate::ln::onion_utils::HTLCFailReason; @@ -112,6 +112,8 @@ pub(super) enum PendingHTLCRouting { phantom_shared_secret: Option<[u8; 32]>, }, ReceiveKeysend { + /// This was added in 0.0.116 and will break deserialization on downgrades. + payment_data: Option, payment_preimage: PaymentPreimage, payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed @@ -501,9 +503,11 @@ struct ClaimablePayments { /// for some reason. They are handled in timer_tick_occurred, so may be processed with /// quite some time lag. enum BackgroundEvent { - /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder - /// commitment transaction. - ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), + /// Handle a ChannelMonitorUpdate + /// + /// Note that any such events are lost on shutdown, so in general they must be updates which + /// are regenerated on startup. + MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), } #[derive(Debug)] @@ -623,7 +627,9 @@ pub type SimpleArcChannelManager = ChannelManager< Arc>>, Arc, - Arc>>, Arc>>> + Arc>>, Arc>>>, + ProbabilisticScoringFeeParameters, + ProbabilisticScorer>>, Arc>, >>, Arc >; @@ -639,7 +645,7 @@ pub type SimpleArcChannelManager = ChannelManager< /// of [`KeysManager`] and [`DefaultRouter`]. /// /// This is not exported to bindings users as Arcs don't make sense in bindings -pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>; /// A trivial trait which describes any [`ChannelManager`] used in testing. #[cfg(any(test, feature = "_test_utils"))] @@ -2338,20 +2344,7 @@ where }); }, msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => { - if payment_data.is_some() && keysend_preimage.is_some() { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "We don't support MPP keysend payments", - }); - } else if let Some(data) = payment_data { - PendingHTLCRouting::Receive { - payment_data: data, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - phantom_shared_secret, - } - } else if let Some(payment_preimage) = keysend_preimage { + if let Some(payment_preimage) = keysend_preimage { // We need to check that the sender knows the keysend preimage before processing this // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X // could discover the final destination of X, by probing the adjacent nodes on the route @@ -2365,12 +2358,26 @@ where msg: "Payment preimage didn't match payment hash", }); } - + if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { + return Err(ReceiveError { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "We don't support MPP keysend payments", + }); + } PendingHTLCRouting::ReceiveKeysend { + payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry: hop_data.outgoing_cltv_value, } + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + payment_metadata, + incoming_cltv_expiry: hop_data.outgoing_cltv_value, + phantom_shared_secret, + } } else { return Err(ReceiveError { err_code: 0x4000|0x2000|3, @@ -2728,10 +2735,9 @@ where let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?; let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?; - if onion_utils::route_size_insane(&onion_payloads) { - return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()}); - } - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); + + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash) + .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?; let err: Result<(), _> = loop { let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) { @@ -2921,8 +2927,6 @@ where /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See /// [`send_payment`] for more information about the risks of duplicate preimage usage. /// - /// Note that `route` must have exactly one path. - /// /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); @@ -3090,6 +3094,12 @@ where } } self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| { + if tx.output.len() > u16::max_value() as usize { + return Err(APIError::APIMisuseError { + err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + }); + } + let mut output_index = None; let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { @@ -3099,11 +3109,6 @@ where err: "Multiple outputs matched the expected script and value".to_owned() }); } - if idx > u16::max_value() as usize { - return Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() - }); - } output_index = Some(idx as u16); } } @@ -3488,16 +3493,19 @@ where (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret, onion_fields) }, - PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_metadata, incoming_cltv_expiry } => { - let onion_fields = RecipientOnionFields { payment_secret: None, payment_metadata }; + PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => { + let onion_fields = RecipientOnionFields { + payment_secret: payment_data.as_ref().map(|data| data.payment_secret), + payment_metadata + }; (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), - None, None, onion_fields) + payment_data, None, onion_fields) }, _ => { panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); } }; - let mut claimable_htlc = ClaimableHTLC { + let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -3547,13 +3555,11 @@ where } macro_rules! check_total_value { - ($payment_data: expr, $payment_preimage: expr) => {{ + ($purpose: expr) => {{ let mut payment_claimable_generated = false; - let purpose = || { - events::PaymentPurpose::InvoicePayment { - payment_preimage: $payment_preimage, - payment_secret: $payment_data.payment_secret, - } + let is_keysend = match $purpose { + events::PaymentPurpose::SpontaneousPayment(_) => true, + events::PaymentPurpose::InvoicePayment { .. } => false, }; let mut claimable_payments = self.claimable_payments.lock().unwrap(); if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { @@ -3565,9 +3571,18 @@ where .or_insert_with(|| { committed_to_claimable = true; ClaimablePayment { - purpose: purpose(), htlcs: Vec::new(), onion_fields: None, + purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None, } }); + if $purpose != claimable_payment.purpose { + let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; + log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), log_bytes!(payment_hash.0), log_keysend(!is_keysend)); + fail_htlc!(claimable_htlc, payment_hash); + } + if !self.default_configuration.accept_mpp_keysend && is_keysend && !claimable_payment.htlcs.is_empty() { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash and our config states we don't accept MPP keysend", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc, payment_hash); + } if let Some(earlier_fields) = &mut claimable_payment.onion_fields { if earlier_fields.check_merge(&mut onion_fields).is_err() { fail_htlc!(claimable_htlc, payment_hash); @@ -3576,38 +3591,27 @@ where claimable_payment.onion_fields = Some(onion_fields); } let ref mut htlcs = &mut claimable_payment.htlcs; - if htlcs.len() == 1 { - if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc, payment_hash); - } - } let mut total_value = claimable_htlc.sender_intended_value; let mut earliest_expiry = claimable_htlc.cltv_expiry; for htlc in htlcs.iter() { total_value += htlc.sender_intended_value; earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry); - match &htlc.onion_payload { - OnionPayload::Invoice { .. } => { - if htlc.total_msat != $payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat); - total_value = msgs::MAX_VALUE_MSAT; - } - if total_value >= msgs::MAX_VALUE_MSAT { break; } - }, - _ => unreachable!(), + if htlc.total_msat != claimable_htlc.total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), claimable_htlc.total_msat, htlc.total_msat); + total_value = msgs::MAX_VALUE_MSAT; } + if total_value >= msgs::MAX_VALUE_MSAT { break; } } // The condition determining whether an MPP is complete must // match exactly the condition used in `timer_tick_occurred` if total_value >= msgs::MAX_VALUE_MSAT { fail_htlc!(claimable_htlc, payment_hash); - } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat { + } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat { log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); - } else if total_value >= $payment_data.total_msat { + } else if total_value >= claimable_htlc.total_msat { #[allow(unused_assignments)] { committed_to_claimable = true; } @@ -3618,7 +3622,7 @@ where new_events.push_back((events::Event::PaymentClaimable { receiver_node_id: Some(receiver_node_id), payment_hash, - purpose: purpose(), + purpose: $purpose, amount_msat, via_channel_id: Some(prev_channel_id), via_user_channel_id: Some(prev_user_channel_id), @@ -3666,49 +3670,23 @@ where fail_htlc!(claimable_htlc, payment_hash); } } - check_total_value!(payment_data, payment_preimage); + let purpose = events::PaymentPurpose::InvoicePayment { + payment_preimage: payment_preimage.clone(), + payment_secret: payment_data.payment_secret, + }; + check_total_value!(purpose); }, OnionPayload::Spontaneous(preimage) => { - let mut claimable_payments = self.claimable_payments.lock().unwrap(); - if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { - fail_htlc!(claimable_htlc, payment_hash); - } - match claimable_payments.claimable_payments.entry(payment_hash) { - hash_map::Entry::Vacant(e) => { - let amount_msat = claimable_htlc.value; - claimable_htlc.total_value_received = Some(amount_msat); - let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER); - let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); - e.insert(ClaimablePayment { - purpose: purpose.clone(), - onion_fields: Some(onion_fields.clone()), - htlcs: vec![claimable_htlc], - }); - let prev_channel_id = prev_funding_outpoint.to_channel_id(); - new_events.push_back((events::Event::PaymentClaimable { - receiver_node_id: Some(receiver_node_id), - payment_hash, - amount_msat, - purpose, - via_channel_id: Some(prev_channel_id), - via_user_channel_id: Some(prev_user_channel_id), - claim_deadline, - onion_fields: Some(onion_fields), - }, None)); - }, - hash_map::Entry::Occupied(_) => { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc, payment_hash); - } - } + let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); + check_total_value!(purpose); } } }, hash_map::Entry::Occupied(inbound_payment) => { - if payment_data.is_none() { + if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); - }; + } let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); @@ -3718,7 +3696,11 @@ where log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { - let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); + let purpose = events::PaymentPurpose::InvoicePayment { + payment_preimage: inbound_payment.get().payment_preimage, + payment_secret: payment_data.payment_secret, + }; + let payment_claimable_generated = check_total_value!(purpose); if payment_claimable_generated { inbound_payment.remove_entry(); } @@ -3773,7 +3755,7 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => { + BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. let _ = self.chain_monitor.update_channel(funding_txo, &update); @@ -4257,18 +4239,6 @@ where break; } expected_amt_msat = htlc.total_value_received; - - if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { - // We don't currently support MPP for spontaneous payments, so just check - // that there's one payment here and move on. - if sources.len() != 1 { - log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); - debug_assert!(false); - valid_mpp = false; - break; - } - } - claimable_amt_msat += htlc.value; } mem::drop(per_peer_state); @@ -5693,7 +5663,7 @@ where if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); } else { unreachable!(); } - self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update))); + self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update))); } self.finish_force_close_channel(failure); } @@ -6488,11 +6458,23 @@ where let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id); } + fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.temporary_channel_id.clone())), *counterparty_node_id); + } + fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id); } + fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.temporary_channel_id.clone())), *counterparty_node_id); + } + fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id); @@ -6595,23 +6577,40 @@ where }); pending_msg_events.retain(|msg| { match msg { + // V1 Channel Establishment &events::MessageSendEvent::SendAcceptChannel { .. } => false, &events::MessageSendEvent::SendOpenChannel { .. } => false, &events::MessageSendEvent::SendFundingCreated { .. } => false, &events::MessageSendEvent::SendFundingSigned { .. } => false, + // V2 Channel Establishment + &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false, + &events::MessageSendEvent::SendOpenChannelV2 { .. } => false, + // Common Channel Establishment &events::MessageSendEvent::SendChannelReady { .. } => false, &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, + // Interactive Transaction Construction + &events::MessageSendEvent::SendTxAddInput { .. } => false, + &events::MessageSendEvent::SendTxAddOutput { .. } => false, + &events::MessageSendEvent::SendTxRemoveInput { .. } => false, + &events::MessageSendEvent::SendTxRemoveOutput { .. } => false, + &events::MessageSendEvent::SendTxComplete { .. } => false, + &events::MessageSendEvent::SendTxSignatures { .. } => false, + &events::MessageSendEvent::SendTxInitRbf { .. } => false, + &events::MessageSendEvent::SendTxAckRbf { .. } => false, + &events::MessageSendEvent::SendTxAbort { .. } => false, + // Channel Operations &events::MessageSendEvent::UpdateHTLCs { .. } => false, &events::MessageSendEvent::SendRevokeAndACK { .. } => false, &events::MessageSendEvent::SendClosingSigned { .. } => false, &events::MessageSendEvent::SendShutdown { .. } => false, &events::MessageSendEvent::SendChannelReestablish { .. } => false, + &events::MessageSendEvent::HandleError { .. } => false, + // Gossip &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::SendChannelUpdate { .. } => false, - &events::MessageSendEvent::HandleError { .. } => false, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, &events::MessageSendEvent::SendReplyChannelRange { .. } => false, @@ -6768,6 +6767,60 @@ where fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures { provided_init_features(&self.default_configuration) } + + fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_add_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddOutput) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_remove_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveInput) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_remove_output(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxRemoveOutput) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_complete(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxComplete) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_init_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxInitRbf) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_ack_rbf(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAckRbf) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } + + fn handle_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) { + let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + "Dual-funded channels not supported".to_owned(), + msg.channel_id.clone())), *counterparty_node_id); + } } /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by @@ -6973,6 +7026,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, payment_preimage, required), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), + (4, payment_data, option), // Added in 0.0.116 }, ;); @@ -7168,8 +7222,10 @@ impl Readable for HTLCSource { return Err(DecodeError::InvalidValue); } if let Some(params) = payment_params.as_mut() { - if params.final_cltv_expiry_delta == 0 { - params.final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?; + if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee { + if final_cltv_expiry_delta == &0 { + *final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?; + } } } Ok(HTLCSource::OutboundRoute { @@ -7362,17 +7418,12 @@ where } } - let background_events = self.pending_background_events.lock().unwrap(); - (background_events.len() as u64).write(writer)?; - for event in background_events.iter() { - match event { - BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => { - 0u8.write(writer)?; - funding_txo.write(writer)?; - monitor_update.write(writer)?; - }, - } - } + // LDK versions prior to 0.0.116 wrote the `pending_background_events` + // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so - + // the closing monitor updates were always effectively replayed on startup (either directly + // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during + // deserialization or, in 0.0.115, by regenerating the monitor update itself). + 0u64.write(writer)?; // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is @@ -7687,7 +7738,7 @@ where log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some(monitor_update) = monitor_update { - pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update)); + pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update)); } failed_htlcs.append(&mut new_failed_htlcs); channel_closures.push_back((events::Event::ChannelClosed { @@ -7762,7 +7813,7 @@ where update_id: CLOSED_CHANNEL_UPDATE_ID, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; - pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update))); + pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); } } @@ -7819,13 +7870,11 @@ where for _ in 0..background_event_count { match ::read(reader)? { 0 => { - let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?); - if pending_background_events.iter().find(|e| { - let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e; - *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update - }).is_none() { - pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update))); - } + // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, + // however we really don't (and never did) need them - we regenerate all + // on-startup monitor updates. + let _: OutPoint = Readable::read(reader)?; + let _: ChannelMonitorUpdate = Readable::read(reader)?; } _ => return Err(DecodeError::InvalidValue), } @@ -8480,13 +8529,26 @@ mod tests { #[test] fn test_keysend_dup_payment_hash() { + do_test_keysend_dup_payment_hash(false); + do_test_keysend_dup_payment_hash(true); + } + + fn do_test_keysend_dup_payment_hash(accept_mpp_keysend: bool) { // (1): Test that a keysend payment with a duplicate payment hash to an existing pending // outbound regular payment fails as expected. // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment // fails as expected. + // (3): Test that a keysend payment with a duplicate payment hash to an existing keysend + // payment fails as expected. When `accept_mpp_keysend` is false, this tests that we + // reject MPP keysend payments, since in this case where the payment has no payment + // secret, a keysend payment with a duplicate hash is basically an MPP keysend. If + // `accept_mpp_keysend` is true, this tests that we only accept MPP keysends with + // payment secrets and reject otherwise. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut mpp_keysend_cfg = test_default_channel_config(); + mpp_keysend_cfg.accept_mpp_keysend = accept_mpp_keysend; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(mpp_keysend_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); let scorer = test_utils::TestScorer::new(); @@ -8498,12 +8560,12 @@ mod tests { // Next, attempt a keysend payment and make sure it fails. let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false), final_value_msat: 100_000, }; let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &random_seed_bytes + None, nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap(); @@ -8537,7 +8599,7 @@ mod tests { let payment_preimage = PaymentPreimage([42; 32]); let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &random_seed_bytes + None, nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap(); @@ -8575,6 +8637,53 @@ mod tests { // Finally, succeed the keysend payment. claim_payment(&nodes[0], &expected_route, payment_preimage); + + // To start (3), send a keysend payment but don't claim it. + let payment_id_1 = PaymentId([44; 32]); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), payment_id_1).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path = vec![&nodes[1]]; + pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage)); + + // Next, attempt a keysend payment and make sure it fails. + let route_params = RouteParameters { + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false), + final_value_msat: 100_000, + }; + let route = find_route( + &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, + None, nodes[0].logger, &scorer, &(), &random_seed_bytes + ).unwrap(); + let payment_id_2 = PaymentId([45; 32]); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), payment_id_2).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, claim the original payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); } #[test] @@ -8591,7 +8700,7 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10_000, }; let network_graph = nodes[0].network_graph.clone(); @@ -8600,7 +8709,7 @@ mod tests { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &random_seed_bytes + nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -8624,10 +8733,13 @@ mod tests { #[test] fn test_keysend_msg_with_secret_err() { - // Test that we error as expected if we receive a keysend payment that includes a payment secret. + // Test that we error as expected if we receive a keysend payment that includes a payment + // secret when we don't support MPP keysend. + let mut reject_mpp_keysend_cfg = test_default_channel_config(); + reject_mpp_keysend_cfg.accept_mpp_keysend = false; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(reject_mpp_keysend_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let payer_pubkey = nodes[0].node.get_our_node_id(); @@ -8635,7 +8747,7 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10_000, }; let network_graph = nodes[0].network_graph.clone(); @@ -8644,7 +8756,7 @@ mod tests { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &random_seed_bytes + nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -9301,10 +9413,7 @@ pub mod bench { assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]); - let block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![tx], - }; + let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]); Listen::block_connected(&node_a, &block, 1); Listen::block_connected(&node_b, &block, 1); @@ -9345,7 +9454,7 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV) - .with_features($node_b.invoice_features()); + .with_bolt11_features($node_b.invoice_features()).unwrap(); let mut payment_preimage = PaymentPreimage([0; 32]); payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes()); payment_count += 1;