X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=bdd07ddee9861e5454eac9f63f902cdef316d5d4;hb=8866ed35330bae1af2237c1951d9c4025938aa65;hp=91ce2e5a47402c7d05bd885ce9bb602da88b7a2c;hpb=2b898a3385d226f961ef045560e94e64195bbd4f;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 91ce2e5a..bdd07dde 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -55,7 +55,7 @@ use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs}; use crate::ln::wire::Encode; -use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; +use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, WriteableEcdsaChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; @@ -659,23 +659,23 @@ impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction, /// State we hold per-peer. -pub(super) struct PeerState { +pub(super) struct PeerState where SP::Target: SignerProvider { /// `channel_id` -> `Channel`. /// /// Holds all funded channels where the peer is the counterparty. - pub(super) channel_by_id: HashMap<[u8; 32], Channel>, + pub(super) channel_by_id: HashMap<[u8; 32], Channel>, /// `temporary_channel_id` -> `OutboundV1Channel`. /// /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has /// been assigned a `channel_id`, the entry in this map is removed and one is created in /// `channel_by_id`. - pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel>, + pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel>, /// `temporary_channel_id` -> `InboundV1Channel`. /// /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has /// been assigned a `channel_id`, the entry in this map is removed and one is created in /// `channel_by_id`. - pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, + pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, /// `temporary_channel_id` -> `InboundChannelRequest`. /// /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where @@ -721,7 +721,7 @@ pub(super) struct PeerState { is_connected: bool, } -impl PeerState { +impl PeerState where SP::Target: SignerProvider { /// Indicates that a peer meets the criteria where we're ok to remove it from our storage. /// If true is passed for `require_disconnected`, the function will return false if we haven't /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`. @@ -1146,9 +1146,9 @@ where /// /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] - per_peer_state: FairRwLock::Signer>>>>, + per_peer_state: FairRwLock>>>, #[cfg(any(test, feature = "_test_utils"))] - pub(super) per_peer_state: FairRwLock::Signer>>>>, + pub(super) per_peer_state: FairRwLock>>>, /// The set of events which we need to give to the user to handle. In some cases an event may /// require some further action after the user handles it (currently only blocking a monitor @@ -1594,11 +1594,13 @@ impl ChannelDetails { self.short_channel_id.or(self.outbound_scid_alias) } - fn from_channel_context( - context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures, + fn from_channel_context( + context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures, fee_estimator: &LowerBoundedFeeEstimator ) -> Self - where F::Target: FeeEstimator + where + SP::Target: SignerProvider, + F::Target: FeeEstimator { let balance = context.get_available_balances(fee_estimator); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = @@ -2299,7 +2301,7 @@ where Ok(temporary_channel_id) } - fn list_funded_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside @@ -2425,7 +2427,7 @@ where } /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, context: &ChannelContext<::Signer>, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, context: &ChannelContext, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); match context.unbroadcasted_funding() { Some(transaction) => { @@ -2914,9 +2916,9 @@ where short_channel_id, amt_to_forward, outgoing_cltv_value }, .. } => { - let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx, + let next_packet_pk = onion_utils::next_hop_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key.unwrap(), &shared_secret); - (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk)) + (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_packet_pk)) }, // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the // inbound channel's state. @@ -3115,7 +3117,7 @@ where /// /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed - fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &Channel) -> Result { if !chan.context.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), @@ -3140,7 +3142,7 @@ where /// /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed - fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id())); let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), @@ -3150,7 +3152,7 @@ where self.get_channel_update_for_onion(short_channel_id, chan) } - fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel) -> Result { log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id())); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; @@ -3444,7 +3446,7 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -4362,7 +4364,7 @@ where let _ = self.process_background_events(); } - fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { + fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> NotifyOption { if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { @@ -4521,7 +4523,7 @@ where let process_unfunded_channel_tick = | chan_id: &[u8; 32], - chan_context: &mut ChannelContext<::Signer>, + chan_context: &mut ChannelContext, unfunded_chan_context: &mut UnfundedChannelContext, pending_msg_events: &mut Vec, | { @@ -4711,7 +4713,7 @@ where /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel) -> (u16, Vec) { // We can't be sure what SCID was used when relaying inbound towards us, so we have to // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use @@ -4731,7 +4733,7 @@ where /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. - fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<::Signer>) -> (u16, Vec) { + fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel) -> (u16, Vec) { debug_assert_eq!(desired_err_code & 0x1000, 0x1000); if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); @@ -5194,7 +5196,7 @@ where /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, - channel: &mut Channel<::Signer>, raa: Option, + channel: &mut Channel, raa: Option, commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) @@ -5429,7 +5431,7 @@ where /// The filter is called for each peer and provided with the number of unfunded, inbound, and /// non-0-conf channels we have with the peer. fn peers_without_funded_channels(&self, maybe_count_peer: Filter) -> usize - where Filter: Fn(&PeerState<::Signer>) -> bool { + where Filter: Fn(&PeerState) -> bool { let mut peers_without_funded_channels = 0; let best_block_height = self.best_block.read().unwrap().height(); { @@ -5447,7 +5449,7 @@ where } fn unfunded_channel_count( - peer: &PeerState<::Signer>, best_block_height: u32 + peer: &PeerState, best_block_height: u32 ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { @@ -5893,7 +5895,7 @@ where chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt), Err(e) => PendingHTLCStatus::Fail(e) }; - let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { + let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. @@ -7055,7 +7057,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event::Signer>) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> + fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -7546,6 +7548,46 @@ where fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + match &msg.data as &str { + "cannot co-op close channel w/ active htlcs"| + "link failed to shutdown" => + { + // LND hasn't properly handled shutdown messages ever, and force-closes any time we + // send one while HTLCs are still present. The issue is tracked at + // https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches + // to fix it but none so far have managed to land upstream. The issue appears to be + // very low priority for the LND team despite being marked "P1". + // We're not going to bother handling this in a sensible way, instead simply + // repeating the Shutdown message on repeat until morale improves. + if msg.channel_id != [0; 32] { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if peer_state_mutex_opt.is_none() { return; } + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) { + if let Some(msg) = chan.get_outbound_shutdown() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: *counterparty_node_id, + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() + }, + log_level: Level::Trace, + } + }); + } + } + return; + } + _ => {} + } + if msg.channel_id == [0; 32] { let channel_ids: Vec<[u8; 32]> = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -8561,13 +8603,13 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel<::Signer> = Channel::read(reader, ( + let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; @@ -8712,7 +8754,7 @@ where }; let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex::Signer>>)>())); + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; let peer_chans = peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()); @@ -10534,13 +10576,13 @@ pub mod bench { &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>, &'a test_utils::TestLogger>; - struct ANodeHolder<'a, P: Persist> { - node: &'a Manager<'a, P>, + struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist> { + node: &'node_cfg Manager<'chan_mon_cfg, P>, } - impl<'a, P: Persist> NodeHolder for ANodeHolder<'a, P> { - type CM = Manager<'a, P>; + impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist> NodeHolder for ANodeHolder<'node_cfg, 'chan_mon_cfg, P> { + type CM = Manager<'chan_mon_cfg, P>; #[inline] - fn node(&self) -> &Manager<'a, P> { self.node } + fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node } #[inline] fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None } }