X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=10516d4d0a42ef392d84fbbb3d84ca7e77062703;hb=36429e8245959c6020fbd24ce11fd98d83b5c5e8;hp=b5fc9996e294ac2ffb4cd6d6e964e64c856439f6;hpb=7f177bb6dd6fde177176b7be1ec62ef533038dd3;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b5fc9996..10516d4d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -44,6 +44,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // construct one themselves. use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; @@ -903,7 +904,9 @@ impl PeerState where SP::Target: SignerProvider { if require_disconnected && self.is_connected { return false } - self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0 + !self.channel_by_id.iter().any(|(_, phase)| + matches!(phase, ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_)) + ) && self.monitor_update_blocked_actions.is_empty() && self.in_flight_monitor_updates.is_empty() } @@ -975,6 +978,7 @@ pub type SimpleArcChannelManager = ChannelManager< Arc>>, Arc, + Arc, Arc>>, Arc>>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer>>, Arc>, @@ -1005,6 +1009,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = &'e DefaultRouter< &'f NetworkGraph<&'g L>, &'g L, + &'c KeysManager, &'h RwLock, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L> @@ -1636,9 +1641,6 @@ pub struct ChannelDetails { pub counterparty: ChannelCounterparty, /// The Channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. - /// - /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to - /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`. pub funding_txo: Option, /// The features which this channel operates with. See individual features for more info. /// @@ -1804,6 +1806,14 @@ pub struct ChannelDetails { /// /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. pub config: Option, + /// Pending inbound HTLCs. + /// + /// This field is empty for objects serialized with LDK versions prior to 0.0.122. + pub pending_inbound_htlcs: Vec, + /// Pending outbound HTLCs. + /// + /// This field is empty for objects serialized with LDK versions prior to 0.0.122. + pub pending_outbound_htlcs: Vec, } impl ChannelDetails { @@ -1881,6 +1891,8 @@ impl ChannelDetails { inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), config: Some(context.config()), channel_shutdown_state: Some(context.shutdown_state()), + pending_inbound_htlcs: context.get_pending_inbound_htlc_details(), + pending_outbound_htlcs: context.get_pending_outbound_htlc_details(), } } } @@ -2155,6 +2167,7 @@ macro_rules! emit_channel_pending_event { counterparty_node_id: $channel.context.get_counterparty_node_id(), user_channel_id: $channel.context.get_user_id(), funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(), + channel_type: Some($channel.context.get_channel_type().clone()), }, None)); $channel.context.set_channel_pending_event_emitted(); } @@ -2296,7 +2309,7 @@ macro_rules! handle_new_monitor_update { handle_new_monitor_update!($self, $update_res, $chan, _internal, handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan)) }; - ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in @@ -2435,14 +2448,14 @@ where best_block: RwLock::new(params.best_block), - outbound_scid_aliases: Mutex::new(HashSet::new()), - pending_inbound_payments: Mutex::new(HashMap::new()), + outbound_scid_aliases: Mutex::new(new_hash_set()), + pending_inbound_payments: Mutex::new(new_hash_map()), pending_outbound_payments: OutboundPayments::new(), - forward_htlcs: Mutex::new(HashMap::new()), - claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }), - pending_intercepted_htlcs: Mutex::new(HashMap::new()), - outpoint_to_peer: Mutex::new(HashMap::new()), - short_to_chan_info: FairRwLock::new(HashMap::new()), + forward_htlcs: Mutex::new(new_hash_map()), + claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), + outpoint_to_peer: Mutex::new(new_hash_map()), + short_to_chan_info: FairRwLock::new(new_hash_map()), our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), secp_ctx, @@ -2454,7 +2467,7 @@ where highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize), - per_peer_state: FairRwLock::new(HashMap::new()), + per_peer_state: FairRwLock::new(new_hash_map()), pending_events: Mutex::new(VecDeque::new()), pending_events_processor: AtomicBool::new(false), @@ -2753,7 +2766,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } } else { @@ -3414,7 +3427,7 @@ where }, onion_packet, None, &self.fee_estimator, &&logger); match break_chan_phase_entry!(self, send_res, chan_phase_entry) { Some(monitor_update) => { - match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { + match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { false => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -3688,7 +3701,7 @@ where ProbeSendFailure::RouteNotFound })?; - let mut used_liquidity_map = HashMap::with_capacity(first_hops.len()); + let mut used_liquidity_map = hash_map_with_capacity(first_hops.len()); let mut res = Vec::new(); @@ -4245,7 +4258,7 @@ where let mut failed_forwards = Vec::new(); let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { @@ -4770,7 +4783,7 @@ where hash_map::Entry::Occupied(mut chan_phase) => { if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { updated_chan = true; - handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(), + handle_new_monitor_update!(self, funding_txo, update.clone(), peer_state_lock, peer_state, per_peer_state, chan); } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -5574,7 +5587,7 @@ where peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } if !during_init { - handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock, + handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } else { // If we're running during init we cannot update a monitor directly - @@ -5690,9 +5703,9 @@ where } fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, - forwarded_htlc_value_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint, - next_channel_id: ChannelId, + forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, + startup_replay: bool, next_channel_counterparty_node_id: Option, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, ) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { @@ -5788,18 +5801,21 @@ where }) } else { None } } else { - let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { if let Some(claimed_htlc_value) = htlc_claim_value_msat { Some(claimed_htlc_value - forwarded_htlc_value) } else { None } } else { None }; + debug_assert!(skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat"); Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { - fee_earned_msat, + total_fee_earned_msat, claim_from_onchain_tx: from_onchain, prev_channel_id: Some(prev_channel_id), next_channel_id: Some(next_channel_id), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + skimmed_fee_msat, }, downstream_counterparty_and_funding_outpoint: chan_to_release, }) @@ -5951,7 +5967,7 @@ where // TODO: Once we can rely on the counterparty_node_id from the // monitor event, this and the outpoint_to_peer map should be removed. let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); - match outpoint_to_peer.get(&funding_txo) { + match outpoint_to_peer.get(funding_txo) { Some(cp_id) => cp_id.clone(), None => return, } @@ -5964,7 +5980,7 @@ where peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let channel = - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { chan } else { let update_actions = peer_state.monitor_update_blocked_actions @@ -6570,7 +6586,7 @@ where } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } }, @@ -6739,7 +6755,7 @@ where fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let funding_txo; - let (htlc_source, forwarded_htlc_value) = { + let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -6777,8 +6793,11 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), - false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), + Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id), + funding_txo, msg.channel_id + ); + Ok(()) } @@ -6852,7 +6871,7 @@ where let funding_txo = chan.context.get_funding_txo(); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, + handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } Ok(()) @@ -7031,7 +7050,7 @@ where if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update, + handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } htlcs_to_fail @@ -7276,7 +7295,9 @@ where let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id)); if let Some(preimage) = htlc_update.payment_preimage { log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id); + self.claim_funds_internal(htlc_update.source, preimage, + htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, + false, counterparty_node_id, funding_outpoint, channel_id); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id }; @@ -7372,7 +7393,7 @@ where if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, + handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); continue 'peer_loop; } @@ -7963,7 +7984,6 @@ where /// Errors if the `MessageRouter` errors or returns an empty `Vec`. fn create_blinded_path(&self) -> Result { let recipient = self.get_our_node_id(); - let entropy_source = self.entropy_source.deref(); let secp_ctx = &self.secp_ctx; let peers = self.per_peer_state.read().unwrap() @@ -7973,7 +7993,7 @@ where .collect::>(); self.router - .create_blinded_paths(recipient, peers, entropy_source, secp_ctx) + .create_blinded_paths(recipient, peers, secp_ctx) .and_then(|paths| paths.into_iter().next().ok_or(())) } @@ -7982,7 +8002,6 @@ where fn create_blinded_payment_paths( &self, amount_msats: u64, payment_secret: PaymentSecret ) -> Result, ()> { - let entropy_source = self.entropy_source.deref(); let secp_ctx = &self.secp_ctx; let first_hops = self.list_usable_channels(); @@ -7997,7 +8016,7 @@ where }, }; self.router.create_blinded_payment_paths( - payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx + payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx ) } @@ -8141,7 +8160,7 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", channel_id); - handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update, + handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the @@ -8905,10 +8924,12 @@ where } &mut chan.context }, - // Unfunded channels will always be removed. - ChannelPhase::UnfundedOutboundV1(chan) => { - &mut chan.context + // We retain UnfundedOutboundV1 channel for some time in case + // peer unexpectedly disconnects, and intends to reconnect again. + ChannelPhase::UnfundedOutboundV1(_) => { + return true; }, + // Unfunded inbound channels will always be removed. ChannelPhase::UnfundedInboundV1(chan) => { &mut chan.context }, @@ -9010,8 +9031,8 @@ where return NotifyOption::SkipPersistNoEvents; } e.insert(Mutex::new(PeerState { - channel_by_id: HashMap::new(), - inbound_channel_request_by_id: HashMap::new(), + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -9047,15 +9068,31 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)| - if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } - ).for_each(|chan| { - let logger = WithChannelContext::from(&self.logger, &chan.context); - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&&logger), - }); - }); + for (_, phase) in peer_state.channel_by_id.iter_mut() { + match phase { + ChannelPhase::Funded(chan) => { + let logger = WithChannelContext::from(&self.logger, &chan.context); + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&&logger), + }); + } + + ChannelPhase::UnfundedOutboundV1(chan) => { + pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: chan.context.get_counterparty_node_id(), + msg: chan.get_open_channel(self.chain_hash), + }); + } + + ChannelPhase::UnfundedInboundV1(_) => { + // Since unfunded inbound channel maps are cleared upon disconnecting a peer, + // they are not persisted and won't be recovered after a crash. + // Therefore, they shouldn't exist at this point. + debug_assert!(false); + } + } + } } return NotifyOption::SkipPersistHandleEvents; @@ -9454,6 +9491,8 @@ impl Writeable for ChannelDetails { (37, user_channel_id_high_opt, option), (39, self.feerate_sat_per_1000_weight, option), (41, self.channel_shutdown_state, option), + (43, self.pending_inbound_htlcs, optional_vec), + (45, self.pending_outbound_htlcs, optional_vec), }); Ok(()) } @@ -9492,6 +9531,8 @@ impl Readable for ChannelDetails { (37, user_channel_id_high_opt, option), (39, feerate_sat_per_1000_weight, option), (41, channel_shutdown_state, option), + (43, pending_inbound_htlcs, optional_vec), + (45, pending_outbound_htlcs, optional_vec), }); // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with @@ -9528,6 +9569,8 @@ impl Readable for ChannelDetails { inbound_htlc_maximum_msat, feerate_sat_per_1000_weight, channel_shutdown_state, + pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()), + pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()), }) } } @@ -9816,7 +9859,7 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { (2, prev_short_channel_id, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), - // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be + // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be // filled in, so we can safely unwrap it here. (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))), }); @@ -10062,7 +10105,7 @@ where } // Encode without retry info for 0.0.101 compatibility. - let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + let mut pending_outbound_payments_no_retry: HashMap> = new_hash_map(); for (id, outbound) in pending_outbound_payments.iter() { match outbound { PendingOutboundPayment::Legacy { session_privs } | @@ -10090,7 +10133,7 @@ where for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); } + if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates); } } @@ -10279,7 +10322,9 @@ where mut channel_monitors: Vec<&'a mut ChannelMonitor<::EcdsaSigner>>) -> Self { Self { entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, - channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() + channel_monitors: hash_map_from_iter( + channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }) + ), } } } @@ -10326,13 +10371,13 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; - let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut funded_peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let mut funded_peer_channels: HashMap>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize); + let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) @@ -10418,7 +10463,7 @@ where by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); }, hash_map::Entry::Vacant(entry) => { - let mut by_id_map = HashMap::new(); + let mut by_id_map = new_hash_map(); by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); entry.insert(by_id_map); } @@ -10465,7 +10510,7 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -10491,7 +10536,7 @@ where let peer_state_from_chans = |channel_by_id| { PeerState { channel_by_id, - inbound_channel_request_by_id: HashMap::new(), + inbound_channel_request_by_id: new_hash_map(), latest_features: InitFeatures::empty(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -10502,10 +10547,10 @@ where }; let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); + let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; - let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()); + let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map()); let mut peer_state = peer_state_from_chans(peer_chans); peer_state.latest_features = Readable::read(reader)?; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -10539,7 +10584,7 @@ where let highest_seen_timestamp: u32 = Readable::read(reader)?; let pending_inbound_payment_count: u64 = Readable::read(reader)?; - let mut pending_inbound_payments: HashMap = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); + let mut pending_inbound_payments: HashMap = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); for _ in 0..pending_inbound_payment_count { if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() { return Err(DecodeError::InvalidValue); @@ -10548,11 +10593,11 @@ where let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; let mut pending_outbound_payments_compat: HashMap = - HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; let payment = PendingOutboundPayment::Legacy { - session_privs: [session_priv].iter().cloned().collect() + session_privs: hash_set_from_iter([session_priv]), }; if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { return Err(DecodeError::InvalidValue) @@ -10562,13 +10607,13 @@ where // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = Some(HashMap::new()); + let mut pending_intercepted_htlcs: Option> = Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(HashMap::new()); + let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; let mut in_flight_monitor_updates: Option>> = None; @@ -10605,7 +10650,7 @@ where if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { - let mut outbounds = HashMap::new(); + let mut outbounds = new_hash_map(); for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } @@ -10714,7 +10759,7 @@ where // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| { - Mutex::new(peer_state_from_chans(HashMap::new())) + Mutex::new(peer_state_from_chans(new_hash_map())) }); let mut peer_state = peer_state_mutex.lock().unwrap(); handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, @@ -10775,7 +10820,7 @@ where retry_strategy: None, attempts: PaymentAttempts::new(), payment_params: None, - session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), + session_privs: hash_set_from_iter([session_priv_bytes]), payment_hash: htlc.payment_hash, payment_secret: None, // only used for retries, and we'll never retry on startup payment_metadata: None, // only used for retries, and we'll never retry on startup @@ -10900,7 +10945,7 @@ where let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); - let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len()); + let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); if let Some(purposes) = claimable_htlc_purposes { if purposes.len() != claimable_htlcs_list.len() { return Err(DecodeError::InvalidValue); @@ -10973,7 +11018,7 @@ where } } - let mut outbound_scid_aliases = HashSet::new(); + let mut outbound_scid_aliases = new_hash_set(); for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11078,13 +11123,12 @@ where downstream_counterparty_and_funding_outpoint: Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. } = action { - if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { - let channel_id = blocked_channel_id; + if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) { log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - channel_id); + blocked_channel_id); blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates - .entry(*channel_id) + .entry(*blocked_channel_id) .or_insert_with(Vec::new).push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to @@ -11168,7 +11212,7 @@ where // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. - channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), + channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id); } @@ -11856,8 +11900,8 @@ mod tests { } let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); } fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { @@ -12507,7 +12551,7 @@ mod tests { let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());