X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=fe050de913616f7508a39a6466a669c876949f4c;hb=8f308f98dda28f54e1c7603be5a440a8c0c101c8;hp=d28270db5bb6b49542434064cdd8cbcc8dce6c83;hpb=3fd9fc6fc04ed2ab4b471bbc95a271da4f060b8e;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d28270db..fe050de9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -63,7 +63,7 @@ use crate::offers::merkle::SignError; use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; -use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage}; +use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message}; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; @@ -457,7 +457,7 @@ impl MsgHandleErrInternal { #[inline] fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() }; - let action = if let (Some(_), ..) = &shutdown_res { + let action = if shutdown_res.monitor_update.is_some() { // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we // should disconnect our peer such that we force them to broadcast their latest // commitment upon reconnecting. @@ -827,7 +827,8 @@ struct PendingInboundPayment { /// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types /// of [`KeysManager`] and [`DefaultRouter`]. /// -/// This is not exported to bindings users as Arcs don't make sense in bindings +/// This is not exported to bindings users as type aliases aren't supported in most languages. +#[cfg(not(c_bindings))] pub type SimpleArcChannelManager = ChannelManager< Arc, Arc, @@ -855,7 +856,8 @@ pub type SimpleArcChannelManager = ChannelManager< /// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types /// of [`KeysManager`] and [`DefaultRouter`]. /// -/// This is not exported to bindings users as Arcs don't make sense in bindings +/// This is not exported to bindings users as type aliases aren't supported in most languages. +#[cfg(not(c_bindings))] pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager< &'a M, @@ -2600,7 +2602,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; - let mut shutdown_result = None; + let shutdown_result; loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -2615,10 +2617,11 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; - let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); - let (shutdown_msg, mut monitor_update_opt, htlcs) = + let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; failed_htlcs = htlcs; + shutdown_result = local_shutdown_result; + debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); // We can send the `shutdown` message before updating the `ChannelMonitor` // here as we don't need the monitor update to complete until we send a @@ -2646,7 +2649,6 @@ where }); } self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); - shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); } } break; @@ -2681,11 +2683,11 @@ where /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. /// - /// * If we are the channel initiator, we will pay between our [`Background`] and - /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee - /// estimate. + /// * If we are the channel initiator, we will pay between our [`ChannelCloseMinimum`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`] + /// fee estimate. /// * If our counterparty is the channel initiator, we will require a channel closing - /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// transaction feerate of at least our [`ChannelCloseMinimum`] feerate or the feerate which /// would appear on a force-closure transaction, whichever is lower. We will allow our /// counterparty to pay as much fee as they'd like, however. /// @@ -2697,8 +2699,8 @@ where /// channel. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis - /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background - /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum + /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None, None) @@ -2712,8 +2714,8 @@ where /// the channel being closed or not: /// * If we are the channel initiator, we will pay at least this feerate on the closing /// transaction. The upper-bound is set by - /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee - /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`] + /// fee estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which /// will appear on a force-closure transaction, whichever is lower). @@ -2731,29 +2733,27 @@ where /// channel. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis - /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background - /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, shutdown_script: Option) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script) } - fn finish_close_channel(&self, shutdown_res: ShutdownResult) { + fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); #[cfg(debug_assertions)] for (_, peer) in self.per_peer_state.read().unwrap().iter() { debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } - let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res; - log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); - for htlc_source in failed_htlcs.drain(..) { + log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len()); + for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - if let Some((_, funding_txo, monitor_update)) = monitor_update_option { + if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update { // There isn't anything we can do if we get an update failure - we're already // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to @@ -2761,7 +2761,7 @@ where let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); } let mut shutdown_results = Vec::new(); - if let Some(txid) = unbroadcasted_batch_funding_txid { + if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid { let mut funding_batch_states = self.funding_batch_states.lock().unwrap(); let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -3906,7 +3906,7 @@ where /// Return values are identical to [`Self::funding_transaction_generated`], respective to /// each individual channel and transaction output. /// - /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction + /// Do NOT broadcast the funding transaction yourself. This batch funding transaction /// will only be broadcast when we have safely received and persisted the counterparty's /// signature for each channel. /// @@ -3960,7 +3960,7 @@ where btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), } }); - for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() { + for &(temporary_channel_id, counterparty_node_id) in temporary_channels { result = result.and_then(|_| self.funding_transaction_generated_intern( temporary_channel_id, counterparty_node_id, @@ -4817,8 +4817,8 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); + let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -4828,9 +4828,9 @@ where |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } ) { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - min_mempool_feerate + anchor_feerate } else { - normal_feerate + non_anchor_feerate }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4866,8 +4866,8 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); + let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -4914,9 +4914,9 @@ where match phase { ChannelPhase::Funded(chan) => { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - min_mempool_feerate + anchor_feerate } else { - normal_feerate + non_anchor_feerate }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -6452,22 +6452,20 @@ where } fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { - let mut shutdown_result = None; - let unbroadcasted_batch_funding_txid; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) })?; - let (tx, chan_option) = { + let (tx, chan_option, shutdown_result) = { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { - unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); - let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry); + let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry); + debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); if let Some(msg) = closing_signed { peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: counterparty_node_id.clone(), @@ -6480,8 +6478,8 @@ where // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - (tx, Some(remove_channel_phase!(self, chan_phase_entry))) - } else { (tx, None) } + (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result) + } else { (tx, None, shutdown_result) } } else { return try_chan_phase_entry!(self, Err(ChannelError::Close( "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry); @@ -6503,7 +6501,6 @@ where }); } self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); - shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); } mem::drop(per_peer_state); if let Some(shutdown_result) = shutdown_result { @@ -7236,15 +7233,18 @@ where peer_state.channel_by_id.retain(|channel_id, phase| { match phase { ChannelPhase::Funded(chan) => { - let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { - Ok((msg_opt, tx_opt)) => { + Ok((msg_opt, tx_opt, shutdown_result_opt)) => { if let Some(msg) = msg_opt { has_update = true; pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: chan.context.get_counterparty_node_id(), msg, }); } + debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown()); + if let Some(shutdown_result) = shutdown_result_opt { + shutdown_results.push(shutdown_result); + } if let Some(tx) = tx_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. @@ -7259,7 +7259,6 @@ where log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, &chan.context); - shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid)); false } else { true } }, @@ -7300,7 +7299,7 @@ where // Channel::force_shutdown tries to make us do) as we may still be in initialization, // so we track the update internally and handle it when the user next calls // timer_tick_occurred, guaranteeing we're running normally. - if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() { + if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() { assert_eq!(update.updates.len(), 1); if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); @@ -7325,6 +7324,13 @@ where /// the node must be announced. Otherwise, there is no way to find a path to the introduction /// node in order to send the [`InvoiceRequest`]. /// + /// # Limitations + /// + /// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s + /// reply path. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`Offer`]: crate::offers::offer::Offer /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest pub fn create_offer_builder( @@ -7358,20 +7364,31 @@ where /// invoice. If abandoned, or an invoice isn't received before expiration, the payment will fail /// with an [`Event::InvoiceRequestFailed`]. /// + /// If `max_total_routing_fee_msat` is not specified, The default from + /// [`RouteParameters::from_payment_params_and_value`] is applied. + /// /// # Privacy /// /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as - /// the introduction node and a derived payer id for sender privacy. As such, currently, the + /// the introduction node and a derived payer id for payer privacy. As such, currently, the /// node must be announced. Otherwise, there is no way to find a path to the introduction node /// in order to send the [`Bolt12Invoice`]. /// + /// # Limitations + /// + /// Requires a direct connection to an introduction node in the responding + /// [`Bolt12Invoice::payment_paths`]. + /// /// # Errors /// /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link /// or if `amount_msats` is invalid. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`Refund`]: crate::offers::refund::Refund /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths pub fn create_refund_builder( &self, description: String, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option @@ -7411,6 +7428,9 @@ where /// - `amount_msats` if overpaying what is required for the given `quantity` is desired, and /// - `payer_note` for [`InvoiceRequest::payer_note`]. /// + /// If `max_total_routing_fee_msat` is not specified, The default from + /// [`RouteParameters::from_payment_params_and_value`] is applied. + /// /// # Payment /// /// The provided `payment_id` is used to ensure that only one invoice is paid for the request @@ -7421,6 +7441,19 @@ where /// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the /// payment will fail with an [`Event::InvoiceRequestFailed`]. /// + /// # Privacy + /// + /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`] + /// as the introduction node and a derived payer id for payer privacy. As such, currently, the + /// node must be announced. Otherwise, there is no way to find a path to the introduction node + /// in order to send the [`Bolt12Invoice`]. + /// + /// # Limitations + /// + /// Requires a direct connection to an introduction node in [`Offer::paths`] or to + /// [`Offer::signing_pubkey`], if empty. A similar restriction applies to the responding + /// [`Bolt12Invoice::payment_paths`]. + /// /// # Errors /// /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link @@ -7431,6 +7464,7 @@ where /// [`InvoiceRequest::payer_note`]: crate::offers::invoice_request::InvoiceRequest::payer_note /// [`InvoiceRequestBuilder`]: crate::offers::invoice_request::InvoiceRequestBuilder /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments pub fn pay_for_offer( &self, offer: &Offer, quantity: Option, amount_msats: Option, @@ -7469,11 +7503,11 @@ where let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); if offer.paths().is_empty() { - let message = PendingOnionMessage { - contents: OffersMessage::InvoiceRequest(invoice_request), - destination: Destination::Node(offer.signing_pubkey()), - reply_path: Some(reply_path), - }; + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request), + Destination::Node(offer.signing_pubkey()), + Some(reply_path), + ); pending_offers_messages.push(message); } else { // Send as many invoice requests as there are paths in the offer (with an upper bound). @@ -7481,11 +7515,11 @@ where // one invoice for a given payment id will be paid, even if more than one is received. const REQUEST_LIMIT: usize = 10; for path in offer.paths().into_iter().take(REQUEST_LIMIT) { - let message = PendingOnionMessage { - contents: OffersMessage::InvoiceRequest(invoice_request.clone()), - destination: Destination::BlindedPath(path.clone()), - reply_path: Some(reply_path.clone()), - }; + let message = new_pending_onion_message( + OffersMessage::InvoiceRequest(invoice_request.clone()), + Destination::BlindedPath(path.clone()), + Some(reply_path.clone()), + ); pending_offers_messages.push(message); } } @@ -7500,6 +7534,13 @@ where /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding /// [`PaymentPreimage`]. /// + /// # Limitations + /// + /// Requires a direct connection to an introduction node in [`Refund::paths`] or to + /// [`Refund::payer_id`], if empty. This request is best effort; an invoice will be sent to each + /// node meeting the aforementioned criteria, but there's no guarantee that they will be + /// received and no retries will be made. + /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> { let expanded_key = &self.inbound_payment_key; @@ -7531,19 +7572,19 @@ where let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap(); if refund.paths().is_empty() { - let message = PendingOnionMessage { - contents: OffersMessage::Invoice(invoice), - destination: Destination::Node(refund.payer_id()), - reply_path: Some(reply_path), - }; + let message = new_pending_onion_message( + OffersMessage::Invoice(invoice), + Destination::Node(refund.payer_id()), + Some(reply_path), + ); pending_offers_messages.push(message); } else { for path in refund.paths() { - let message = PendingOnionMessage { - contents: OffersMessage::Invoice(invoice.clone()), - destination: Destination::BlindedPath(path.clone()), - reply_path: Some(reply_path.clone()), - }; + let message = new_pending_onion_message( + OffersMessage::Invoice(invoice.clone()), + Destination::BlindedPath(path.clone()), + Some(reply_path.clone()), + ); pending_offers_messages.push(message); } } @@ -8930,10 +8971,10 @@ where match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) { Ok(invoice) => Ok(OffersMessage::Invoice(invoice)), Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError( - InvoiceError::from_str("Failed signing invoice") + InvoiceError::from_string("Failed signing invoice".to_string()) )), Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError( - InvoiceError::from_str("Failed invoice signature verification") + InvoiceError::from_string("Failed invoice signature verification".to_string()) )), }); match response { @@ -8949,7 +8990,7 @@ where OffersMessage::Invoice(invoice) => { match invoice.verify(expanded_key, secp_ctx) { Err(()) => { - Some(OffersMessage::InvoiceError(InvoiceError::from_str("Unrecognized invoice"))) + Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned()))) }, Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => { Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into())) @@ -8957,7 +8998,7 @@ where Ok(payment_id) => { if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) { log_trace!(self.logger, "Failed paying invoice: {:?}", e); - Some(OffersMessage::InvoiceError(InvoiceError::from_str(&format!("{:?}", e)))) + Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e)))) } else { None } @@ -9923,16 +9964,16 @@ where log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true); - if batch_funding_txid.is_some() { + let mut shutdown_result = channel.context.force_shutdown(true); + if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { + if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update }); } - failed_htlcs.append(&mut new_failed_htlcs); + failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(),