X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=d0badb8b2cc7ca8e0a3688740c2a24aefe2c5f6d;hb=a1c30041360905e5edcb343447cf1d5872188914;hp=75cd72b8f6897416fd482362263af139201fd459;hpb=3c0420c39e8bf2b9b2d654a73fcf43899646859b;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 75cd72b8..d0badb8b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -43,12 +43,12 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; -use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails}; +use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; @@ -63,7 +63,8 @@ use crate::offers::merkle::SignError; use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; -use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message}; +use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message}; +use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; @@ -550,9 +551,8 @@ impl Into for FailureCode { struct MsgHandleErrInternal { err: msgs::LightningError, - chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed + closes_channel: bool, shutdown_finish: Option<(ShutdownResult, Option)>, - channel_capacity: Option, } impl MsgHandleErrInternal { #[inline] @@ -567,17 +567,16 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: None, + closes_channel: false, shutdown_finish: None, - channel_capacity: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None } + Self { err, closes_channel: false, shutdown_finish: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { + fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option) -> Self { let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() }; let action = if shutdown_res.monitor_update.is_some() { // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we @@ -589,9 +588,8 @@ impl MsgHandleErrInternal { }; Self { err: LightningError { err, action }, - chan_id: Some((channel_id, user_channel_id)), + closes_channel: true, shutdown_finish: Some((shutdown_res, channel_update)), - channel_capacity: Some(channel_capacity) } } #[inline] @@ -622,14 +620,13 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: None, + closes_channel: false, shutdown_finish: None, - channel_capacity: None, } } fn closes_channel(&self) -> bool { - self.chan_id.is_some() + self.closes_channel } } @@ -1961,30 +1958,27 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => { + Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => { let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &$self.logger, Some(counterparty_node_id), Some(channel_id), + ); + log_error!(logger, "Force-closing channel: {}", err.err); + $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - if let Some((channel_id, user_channel_id)) = chan_id { - $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed { - channel_id, user_channel_id, - reason: ClosureReason::ProcessingError { err: err.err.clone() }, - counterparty_node_id: Some($counterparty_node_id), - channel_capacity_sats: channel_capacity, - }, None)); - } + } else { + log_error!($self.logger, "Got non-closing error: {}", err.err); } - let logger = WithContext::from( - &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id) - ); - log_error!(logger, "{}", err.err); if let msgs::ErrorAction::IgnoreError = err.action { } else { msg_events.push(events::MessageSendEvent::HandleError { @@ -2044,12 +2038,11 @@ macro_rules! convert_chan_phase_err { let logger = WithChannelContext::from(&$self.logger, &$channel.context); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); update_maps_on_chan_removal!($self, $channel.context); - let shutdown_res = $channel.context.force_shutdown(true); - let user_id = $channel.context.get_user_id(); - let channel_capacity_satoshis = $channel.context.get_value_satoshis(); - - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id, - shutdown_res, $channel_update, channel_capacity_satoshis)) + let reason = ClosureReason::ProcessingError { err: msg.clone() }; + let shutdown_res = $channel.context.force_shutdown(true, reason); + let err = + MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); + (true, err) }, } }; @@ -2706,26 +2699,6 @@ where .collect() } - /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, context: &ChannelContext, closure_reason: ClosureReason) { - let mut pending_events_lock = self.pending_events.lock().unwrap(); - match context.unbroadcasted_funding() { - Some(transaction) => { - pending_events_lock.push_back((events::Event::DiscardFunding { - channel_id: context.channel_id(), transaction - }, None)); - }, - None => {}, - } - pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: context.channel_id(), - user_channel_id: context.get_user_id(), - reason: closure_reason, - counterparty_node_id: Some(context.get_counterparty_node_id()), - channel_capacity_sats: Some(context.get_value_satoshis()), - }, None)); - } - fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -2767,9 +2740,8 @@ where peer_state_lock, peer_state, per_peer_state, chan); } } else { - self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); - shutdown_result = Some(chan_phase.context_mut().force_shutdown(false)); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed)); } }, hash_map::Entry::Vacant(_) => { @@ -2866,7 +2838,9 @@ where let logger = WithContext::from( &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), ); - log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len()); + + log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail", + shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); @@ -2891,8 +2865,7 @@ where let mut peer_state = peer_state_mutex.lock().unwrap(); if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { update_maps_on_chan_removal!(self, &chan.context()); - self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure); - shutdown_results.push(chan.context_mut().force_shutdown(false)); + shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure)); } } has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); @@ -2902,6 +2875,24 @@ where "Closing a batch where all channels have completed initial monitor update", ); } + + { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::ChannelClosed { + channel_id: shutdown_res.channel_id, + user_channel_id: shutdown_res.user_channel_id, + reason: shutdown_res.closure_reason, + counterparty_node_id: Some(shutdown_res.counterparty_node_id), + channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, + }, None)); + + if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { + pending_events.push_back((events::Event::DiscardFunding { + channel_id: shutdown_res.channel_id, transaction + }, None)); + } + } for shutdown_result in shutdown_results.drain(..) { self.finish_close_channel(shutdown_result); } @@ -2924,17 +2915,16 @@ where let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id)); if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { log_error!(logger, "Force-closing channel {}", channel_id); - self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); mem::drop(peer_state); mem::drop(per_peer_state); match chan_phase { ChannelPhase::Funded(mut chan) => { - self.finish_close_channel(chan.context.force_shutdown(broadcast)); + self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason)); (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) }, ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { - self.finish_close_channel(chan_phase.context_mut().force_shutdown(false)); + self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason)); // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, @@ -3245,14 +3235,14 @@ where // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } } } @@ -3755,7 +3745,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let funding_txo; - let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { + let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { funding_txo = find_funding_output(&chan, &funding_transaction)?; @@ -3763,10 +3753,9 @@ where let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); - let user_id = chan.context.get_user_id(); - let shutdown_res = chan.context.force_shutdown(false); - let channel_capacity = chan.context.get_value_satoshis(); - (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) + let reason = ClosureReason::ProcessingError { err: msg.clone() }; + let shutdown_res = chan.context.force_shutdown(false, reason); + (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)) } else { unreachable!(); }); match funding_res { Ok(funding_msg) => (chan, funding_msg), @@ -3806,8 +3795,20 @@ where }, hash_map::Entry::Vacant(e) => { let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); - if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() { - panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible"); + match outpoint_to_peer.entry(funding_txo) { + hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); }, + hash_map::Entry::Occupied(o) => { + let err = format!( + "An existing channel using outpoint {} is open with peer {}", + funding_txo, o.get() + ); + mem::drop(outpoint_to_peer); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + let reason = ClosureReason::ProcessingError { err: err.clone() }; + self.finish_close_channel(chan.context.force_shutdown(true, reason)); + return Err(APIError::ChannelUnavailable { err }); + } } e.insert(ChannelPhase::UnfundedOutboundV1(chan)); } @@ -3971,8 +3972,8 @@ where .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id)) .map(|mut chan| { update_maps_on_chan_removal!(self, &chan.context()); - self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() }); - shutdown_results.push(chan.context_mut().force_shutdown(false)); + let closure_reason = ClosureReason::ProcessingError { err: e.clone() }; + shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason)); }); } } @@ -4309,7 +4310,7 @@ where current_height, self.default_configuration.accept_mpp_keysend) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), - Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -4351,7 +4352,7 @@ where if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { let logger = WithChannelContext::from(&self.logger, &chan.context); for forward_info in pending_forwards.drain(..) { - match forward_info { + let queue_fail_htlc_res = match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { @@ -4397,40 +4398,35 @@ where )); continue; } + None }, HTLCForwardInfo::AddHTLC { .. } => { panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.queue_fail_htlc( - htlc_id, err_packet, &&logger - ) { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } + Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id)) }, HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } + log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + let res = chan.queue_fail_malformed_htlc( + htlc_id, failure_code, sha256_of_onion, &&logger + ); + Some((res, htlc_id)) }, + }; + if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { + if let Err(e) = queue_fail_htlc_res { + if let ChannelError::Ignore(msg) = e { + log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + } } } } else { @@ -4894,8 +4890,7 @@ where log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); - self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed); - shutdown_channels.push(context.force_shutdown(false)); + shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed)); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -6002,13 +5997,20 @@ where } fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { + + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id)); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let peers_without_funded_channels = self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 }); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + .ok_or_else(|| { + let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); + log_error!(logger, "{}", err_str); + + APIError::ChannelUnavailable { err: err_str } + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let is_only_peer_channel = peer_state.total_channel_count() == 1; @@ -6023,9 +6025,19 @@ where InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height, - &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() }) + &self.logger, accept_0conf).map_err(|e| { + let err_str = e.to_string(); + log_error!(logger, "{}", err_str); + + APIError::ChannelUnavailable { err: err_str } + }) + } + _ => { + let err_str = "No such channel awaiting to be accepted.".to_owned(); + log_error!(logger, "{}", err_str); + + Err(APIError::APIMisuseError { err: err_str }) } - _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() }) }?; if accept_0conf { @@ -6039,7 +6051,10 @@ where } }; peer_state.pending_msg_events.push(send_msg_err_event); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned(); + log_error!(logger, "{}", err_str); + + return Err(APIError::APIMisuseError { err: err_str }); } else { // If this peer already has some channels, a new channel won't increase our number of peers // with unfunded channels, so as long as we aren't over the maximum number of unfunded @@ -6052,7 +6067,10 @@ where } }; peer_state.pending_msg_events.push(send_msg_err_event); - return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); + let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned(); + log_error!(logger, "{}", err_str); + + return Err(APIError::APIMisuseError { err: err_str }); } } @@ -6175,13 +6193,18 @@ where // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. if self.default_configuration.manually_accept_inbound_channels { + let channel_type = channel::channel_type_from_open_channel( + &msg, &peer_state.latest_features, &self.channel_type_features() + ).map_err(|e| + MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id) + )?; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::OpenChannelRequest { temporary_channel_id: msg.temporary_channel_id.clone(), counterparty_node_id: counterparty_node_id.clone(), funding_satoshis: msg.funding_satoshis, push_msat: msg.push_msat, - channel_type: msg.channel_type.clone().unwrap(), + channel_type, }, None)); peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest { open_channel_msg: msg.clone(), @@ -6381,7 +6404,7 @@ where let res = chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); match res { - Ok((chan, monitor)) => { + Ok((mut chan, monitor)) => { if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping @@ -6393,6 +6416,11 @@ where Ok(()) } else { let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + // We weren't able to watch the channel to begin with, so no + // updates should be made on it. Previously, full_stack_target + // found an (unreachable) panic when the monitor update contained + // within `shutdown_finish` was applied. + chan.unset_funding_info(msg.channel_id); return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1); } }, @@ -6515,9 +6543,8 @@ where let context = phase.context_mut(); let logger = WithChannelContext::from(&self.logger, context); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); - self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut chan = remove_channel_phase!(self, chan_phase_entry); - finish_shutdown = Some(chan.context_mut().force_shutdown(false)); + finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel)); }, } } else { @@ -6586,7 +6613,6 @@ where msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); } mem::drop(per_peer_state); if let Some(shutdown_result) = shutdown_result { @@ -7238,13 +7264,12 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { - failed_channels.push(chan.context.force_shutdown(false)); + failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { @@ -7431,8 +7456,6 @@ where }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); - log_info!(logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, &chan.context); @@ -7756,15 +7779,15 @@ where let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret) .map_err(|_| Bolt12SemanticError::MissingPaths)?; - #[cfg(not(feature = "no-std"))] + #[cfg(feature = "std")] let builder = refund.respond_using_derived_keys( payment_paths, payment_hash, expanded_key, entropy )?; - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let created_at = Duration::from_secs( self.highest_seen_timestamp.load(Ordering::Acquire) as u64 ); - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let builder = refund.respond_using_derived_keys_no_std( payment_paths, payment_hash, created_at, expanded_key, entropy )?; @@ -8445,14 +8468,13 @@ where update_maps_on_chan_removal!(self, &channel.context); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. - failed_channels.push(channel.context.force_shutdown(true)); + let reason_message = format!("{}", reason); + failed_channels.push(channel.context.force_shutdown(true, reason)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - let reason_message = format!("{}", reason); - self.issue_channel_close_events(&channel.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { @@ -8850,8 +8872,7 @@ where }; // Clean up for removal. update_maps_on_chan_removal!(self, &context); - self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer); - failed_channels.push(context.force_shutdown(false)); + failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer)); false }); // Note that we don't bother generating any events for pre-accept channels - @@ -8984,13 +9005,7 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)| - if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { - // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted - // (so won't be recovered after a crash), they shouldn't exist here and we would never need to - // worry about closing and removing them. - debug_assert!(false); - None - } + if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } ).for_each(|chan| { let logger = WithChannelContext::from(&self.logger, &chan.context); pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { @@ -9209,17 +9224,17 @@ where }, }; - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let created_at = Duration::from_secs( self.highest_seen_timestamp.load(Ordering::Acquire) as u64 ); if invoice_request.keys.is_some() { - #[cfg(not(feature = "no-std"))] + #[cfg(feature = "std")] let builder = invoice_request.respond_using_derived_keys( payment_paths, payment_hash ); - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let builder = invoice_request.respond_using_derived_keys_no_std( payment_paths, payment_hash, created_at ); @@ -9228,9 +9243,9 @@ where Err(error) => Some(OffersMessage::InvoiceError(error.into())), } } else { - #[cfg(not(feature = "no-std"))] + #[cfg(feature = "std")] let builder = invoice_request.respond_with(payment_paths, payment_hash); - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let builder = invoice_request.respond_with_no_std( payment_paths, payment_hash, created_at ); @@ -10297,7 +10312,7 @@ where log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let mut shutdown_result = channel.context.force_shutdown(true); + let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager); if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } @@ -10313,6 +10328,7 @@ where reason: ClosureReason::OutdatedChannelManager, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; @@ -10359,13 +10375,14 @@ where // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.context.force_shutdown(false); + let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer); channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), reason: ClosureReason::DisconnectedPeer, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); } else { log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id()); @@ -12105,8 +12122,8 @@ mod tests { let sender_intended_amt_msat = 100; let extra_fee_msat = 10; let hop_data = msgs::InboundOnionPayload::Receive { - amt_msat: 100, - outgoing_cltv_value: 42, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 42, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12117,7 +12134,7 @@ mod tests { // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. let current_height: u32 = node[0].node.best_block.read().unwrap().height(); - if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), current_height, node[0].node.default_configuration.accept_mpp_keysend) @@ -12127,8 +12144,8 @@ mod tests { // If amt_received + extra_fee is equal to the sender intended amount, we're fine. let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone - amt_msat: 100, - outgoing_cltv_value: 42, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 42, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12151,8 +12168,8 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height(); let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive { - amt_msat: 100, - outgoing_cltv_value: 22, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 22, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12510,7 +12527,7 @@ pub mod bench { let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); let scorer = RwLock::new(test_utils::TestScorer::new()); - let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer); + let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer); let mut config: UserConfig = Default::default(); config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);