X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=9e573a705f7e51452d675424dd2bab348c0aff1b;hb=642913c586fc71b0e413532e7dedcd19cfd4815c;hp=a106537ff66f4d6b9fe76d9e3268125296e480fb;hpb=920d96edb6595289902f287419de2d002e2dc2ee;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a106537f..9e573a70 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -58,7 +58,7 @@ use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration}; use crate::ln::wire::Encode; use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice}; use crate::offers::invoice_error::InvoiceError; @@ -105,7 +105,7 @@ use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; +pub use crate::ln::outbound_payment::{Bolt12PaymentError, PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -636,7 +636,7 @@ impl MsgHandleErrInternal { err: msg, action: msgs::ErrorAction::IgnoreError, }, - ChannelError::Close(msg) => LightningError { + ChannelError::Close((msg, _reason)) => LightningError { err: msg.clone(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -962,6 +962,11 @@ pub(super) struct InboundChannelRequest { /// accepted. An unaccepted channel that exceeds this limit will be abandoned. const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2; +/// The number of blocks of historical feerate estimates we keep around and consider when deciding +/// to force-close a channel for having too-low fees. Also the number of blocks we have to see +/// after startup before we consider force-closing channels for having too-low fees. +pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144; + /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is /// actually ours and not some duplicate HTLC sent to us by a node along the route. /// @@ -2098,6 +2103,21 @@ where /// Tracks the message events that are to be broadcasted when we are connected to some peer. pending_broadcast_messages: Mutex>, + /// We only want to force-close our channels on peers based on stale feerates when we're + /// confident the feerate on the channel is *really* stale, not just became stale recently. + /// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks + /// (after startup completed) here, and only force-close when channels have a lower feerate + /// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks. + /// + /// We only keep this in memory as we assume any feerates we receive immediately after startup + /// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any + /// actions for a day anyway. + /// + /// The first element in the pair is the + /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the + /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate. + last_days_feerates: Mutex>, + entropy_source: ES, node_signer: NS, signer_provider: SP, @@ -2446,11 +2466,10 @@ macro_rules! convert_chan_phase_err { ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id)) }, - ChannelError::Close(msg) => { + ChannelError::Close((msg, reason)) => { let logger = WithChannelContext::from(&$self.logger, &$channel.context, None); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); update_maps_on_chan_removal!($self, $channel.context); - let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = $channel.context.force_shutdown(true, reason); let err = MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); @@ -2876,6 +2895,8 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_broadcast_messages: Mutex::new(Vec::new()), + last_days_feerates: Mutex::new(VecDeque::new()), + entropy_source, node_signer, signer_provider, @@ -3167,7 +3188,7 @@ where } } else { let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); - shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed)); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) })); } }, hash_map::Entry::Vacant(_) => { @@ -3336,7 +3357,7 @@ where let closure_reason = if let Some(peer_msg) = peer_msg { ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } } else { - ClosureReason::HolderForceClosed + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) } }; let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { @@ -3975,14 +3996,43 @@ where self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata); } - pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { + /// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`. + /// + /// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested + /// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this + /// fails or if the encoded `payment_id` is not recognized. The latter may happen once the + /// payment is no longer tracked because the payment was attempted after: + /// - an invoice for the `payment_id` was already paid, + /// - one full [timer tick] has elapsed since initially requesting the invoice when paying an + /// offer, or + /// - the refund corresponding to the invoice has already expired. + /// + /// To retry the payment, request another invoice using a new `payment_id`. + /// + /// Attempting to pay the same invoice twice while the first payment is still pending will + /// result in a [`Bolt12PaymentError::DuplicateInvoice`]. + /// + /// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate + /// whether or not the payment was successful. + /// + /// [timer tick]: Self::timer_tick_occurred + pub fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice) -> Result<(), Bolt12PaymentError> { + let secp_ctx = &self.secp_ctx; + let expanded_key = &self.inbound_payment_key; + match invoice.verify(expanded_key, secp_ctx) { + Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id), + Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice), + } + } + + fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments .send_payment_for_bolt12_invoice( invoice, payment_id, &self.router, self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, - best_block_height, &self.logger, &self.pending_events, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self, + &self.secp_ctx, best_block_height, &self.logger, &self.pending_events, |args| self.send_payment_along_path(args) ) } @@ -4201,10 +4251,9 @@ where Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { { let counterparty; - let err = if let ChannelError::Close(msg) = $err { + let err = if let ChannelError::Close((msg, reason)) = $err { let channel_id = $chan.context.channel_id(); counterparty = chan.context.get_counterparty_node_id(); - let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = $chan.context.force_shutdown(false, reason); MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None) } else { unreachable!(); }; @@ -4217,7 +4266,7 @@ where match find_funding_output(&chan, &funding_transaction) { Ok(found_funding_txo) => funding_txo = found_funding_txo, Err(err) => { - let chan_err = ChannelError::Close(err.to_owned()); + let chan_err = ChannelError::close(err.to_owned()); let api_err = APIError::APIMisuseError { err: err.to_owned() }; return close_chan!(chan_err, api_err, chan); }, @@ -5511,7 +5560,7 @@ where log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); - shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed)); + shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) })); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -6110,21 +6159,13 @@ where } if valid_mpp { for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.channel_id; - if let Err((pk, err)) = self.claim_funds_from_hop( + self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }) } - ) { - if let msgs::ErrorAction::IgnoreError = err.err.action { - // We got a temporary failure updating monitor, but will claim the - // HTLC when the monitor updating is restored (or on chain). - let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash)); - log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); - } else { errs.push((pk, err)); } - } + ); } } if !valid_mpp { @@ -6146,9 +6187,10 @@ where } } - fn claim_funds_from_hop, bool) -> Option>(&self, - prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) - -> Result<(), (PublicKey, MsgHandleErrInternal)> { + fn claim_funds_from_hop, bool) -> Option>( + &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, + completion_action: ComplFunc, + ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! // If we haven't yet run background events assume we're still deserializing and shouldn't @@ -6210,7 +6252,7 @@ where let action = if let Some(action) = completion_action(None, true) { action } else { - return Ok(()); + return; }; mem::drop(peer_state_lock); @@ -6226,7 +6268,7 @@ where } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); - return Ok(()); + return; }; if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); @@ -6251,7 +6293,7 @@ where } } } - return Ok(()); + return; } } } @@ -6299,7 +6341,6 @@ where // generally always allowed to be duplicative (and it's specifically noted in // `PaymentForwarded`). self.handle_monitor_update_completion_actions(completion_action(None, false)); - Ok(()) } fn finalize_claims(&self, sources: Vec) { @@ -6332,7 +6373,7 @@ where let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(hop_data, payment_preimage, + self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { @@ -6426,10 +6467,6 @@ where }) } }); - if let Err((pk, err)) = res { - let result: Result<(), _> = Err(err); - let _ = handle_error!(self, result, pk); - } }, } } @@ -7016,7 +7053,7 @@ where }, Some(mut phase) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); - let err = ChannelError::Close(err_msg); + let err = ChannelError::close(err_msg); return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) @@ -7031,7 +7068,7 @@ where // `update_maps_on_chan_removal`), we'll remove the existing channel // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint // on the channel. - let err = ChannelError::Close($err.to_owned()); + let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(msg.temporary_channel_id); return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1); } } } @@ -7116,7 +7153,7 @@ where } else { unreachable!(); } Ok(()) } else { - let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned()); // We weren't able to watch the channel to begin with, so no // updates should be made on it. Previously, full_stack_target // found an (unreachable) panic when the monitor update contained @@ -7187,7 +7224,7 @@ where Ok(()) } else { - try_chan_phase_entry!(self, Err(ChannelError::Close( + try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry) } }, @@ -7302,7 +7339,7 @@ where (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result) } else { (tx, None, shutdown_result) } } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7402,7 +7439,7 @@ where } try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7446,7 +7483,7 @@ where next_user_channel_id = chan.context.get_user_id(); res } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7477,7 +7514,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7500,13 +7537,13 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry); } if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry); } Ok(()) @@ -7536,7 +7573,7 @@ where } Ok(()) } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7732,7 +7769,7 @@ where } htlcs_to_fail } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7758,7 +7795,7 @@ where let logger = WithChannelContext::from(&self.logger, &chan.context, None); try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7793,7 +7830,7 @@ where update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()), }); } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7845,7 +7882,7 @@ where } } } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_update for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7907,7 +7944,7 @@ where } need_lnd_workaround } else { - return try_chan_phase_entry!(self, Err(ChannelError::Close( + return try_chan_phase_entry!(self, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry); } }, @@ -7998,7 +8035,7 @@ where let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { reason } else { - ClosureReason::HolderForceClosed + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } }; failed_channels.push(chan.context.force_shutdown(false, reason.clone())); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { @@ -9175,7 +9212,38 @@ where self, || -> NotifyOption { NotifyOption::DoPersist }); *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))); + let mut min_anchor_feerate = None; + let mut min_non_anchor_feerate = None; + if self.background_events_processed_since_startup.load(Ordering::Relaxed) { + // If we're past the startup phase, update our feerate cache + let mut last_days_feerates = self.last_days_feerates.lock().unwrap(); + if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS { + last_days_feerates.pop_front(); + } + let anchor_feerate = self.fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee); + let non_anchor_feerate = self.fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee); + last_days_feerates.push_back((anchor_feerate, non_anchor_feerate)); + if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS { + min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied(); + min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied(); + } + } + + self.do_chain_event(Some(height), |channel| { + let logger = WithChannelContext::from(&self.logger, &channel.context, None); + if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + if let Some(feerate) = min_anchor_feerate { + channel.check_for_stale_feerate(&logger, feerate)?; + } + } else { + if let Some(feerate) = min_non_anchor_feerate { + channel.check_for_stale_feerate(&logger, feerate)?; + } + } + channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)) + }); macro_rules! max_time { ($timestamp: expr) => { @@ -10221,42 +10289,45 @@ where }; match response { - Ok(invoice) => return responder.respond(OffersMessage::Invoice(invoice)), - Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())), + Ok(invoice) => responder.respond(OffersMessage::Invoice(invoice)), + Err(error) => responder.respond(OffersMessage::InvoiceError(error.into())), } }, OffersMessage::Invoice(invoice) => { - let response = invoice - .verify(expanded_key, secp_ctx) - .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned())) - .and_then(|payment_id| { + let result = match invoice.verify(expanded_key, secp_ctx) { + Ok(payment_id) => { let features = self.bolt12_invoice_features(); if invoice.invoice_features().requires_unknown_bits_from(&features) { Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)) + } else if self.default_configuration.manually_handle_bolt12_invoices { + let event = Event::InvoiceReceived { payment_id, invoice, responder }; + self.pending_events.lock().unwrap().push_back((event, None)); + return ResponseInstruction::NoResponse; } else { - self.send_payment_for_bolt12_invoice(&invoice, payment_id) + self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id) .map_err(|e| { log_trace!(self.logger, "Failed paying invoice: {:?}", e); InvoiceError::from_string(format!("{:?}", e)) }) } - }); + }, + Err(()) => Err(InvoiceError::from_string("Unrecognized invoice".to_owned())), + }; - match (responder, response) { - (Some(responder), Err(e)) => responder.respond(OffersMessage::InvoiceError(e)), - (None, Err(_)) => { - log_trace!( - self.logger, - "A response was generated, but there is no reply_path specified for sending the response." - ); - return ResponseInstruction::NoResponse; - } - _ => return ResponseInstruction::NoResponse, + match result { + Ok(()) => ResponseInstruction::NoResponse, + Err(e) => match responder { + Some(responder) => responder.respond(OffersMessage::InvoiceError(e)), + None => { + log_trace!(self.logger, "No reply path for sending invoice error: {:?}", e); + ResponseInstruction::NoResponse + }, + }, } }, OffersMessage::InvoiceError(invoice_error) => { log_trace!(self.logger, "Received invoice_error: {}", invoice_error); - return ResponseInstruction::NoResponse; + ResponseInstruction::NoResponse }, } } @@ -11994,6 +12065,8 @@ where node_signer: args.node_signer, signer_provider: args.signer_provider, + last_days_feerates: Mutex::new(VecDeque::new()), + logger: args.logger, default_configuration: args.default_config, }; @@ -12531,7 +12604,7 @@ mod tests { nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap(); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); // Confirm that the channel_update was not sent immediately to node[1] but was cached. let node_1_events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -12590,7 +12663,7 @@ mod tests { nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -13327,7 +13400,7 @@ mod tests { nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); { let txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1);