X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=55b2cbae441f5870498ab703787b2772ed9a7310;hb=3fd4b3963c75fb47a2ded83cf133758d32549597;hp=dcc63f22cdce78b53ba11412c571100712bcfa3a;hpb=e594021052e251659e33c0f5e82c7ec2b9e99c18;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index dcc63f22..55b2cbae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -44,6 +44,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // construct one themselves. use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; @@ -1805,6 +1806,14 @@ pub struct ChannelDetails { /// /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. pub config: Option, + /// Pending inbound HTLCs. + /// + /// This field is empty for objects serialized with LDK versions prior to 0.0.122. + pub pending_inbound_htlcs: Vec, + /// Pending outbound HTLCs. + /// + /// This field is empty for objects serialized with LDK versions prior to 0.0.122. + pub pending_outbound_htlcs: Vec, } impl ChannelDetails { @@ -1882,6 +1891,8 @@ impl ChannelDetails { inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), config: Some(context.config()), channel_shutdown_state: Some(context.shutdown_state()), + pending_inbound_htlcs: context.get_pending_inbound_htlc_details(), + pending_outbound_htlcs: context.get_pending_outbound_htlc_details(), } } } @@ -2437,14 +2448,14 @@ where best_block: RwLock::new(params.best_block), - outbound_scid_aliases: Mutex::new(HashSet::new()), - pending_inbound_payments: Mutex::new(HashMap::new()), + outbound_scid_aliases: Mutex::new(new_hash_set()), + pending_inbound_payments: Mutex::new(new_hash_map()), pending_outbound_payments: OutboundPayments::new(), - forward_htlcs: Mutex::new(HashMap::new()), - claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }), - pending_intercepted_htlcs: Mutex::new(HashMap::new()), - outpoint_to_peer: Mutex::new(HashMap::new()), - short_to_chan_info: FairRwLock::new(HashMap::new()), + forward_htlcs: Mutex::new(new_hash_map()), + claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), + outpoint_to_peer: Mutex::new(new_hash_map()), + short_to_chan_info: FairRwLock::new(new_hash_map()), our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), secp_ctx, @@ -2456,7 +2467,7 @@ where highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize), - per_peer_state: FairRwLock::new(HashMap::new()), + per_peer_state: FairRwLock::new(new_hash_map()), pending_events: Mutex::new(VecDeque::new()), pending_events_processor: AtomicBool::new(false), @@ -3010,8 +3021,8 @@ where /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the /// `counterparty_node_id` isn't the counterparty of the corresponding channel. /// - /// You can always get the latest local transaction(s) to broadcast from - /// [`ChannelMonitor::get_latest_holder_commitment_txn`]. + /// You can always broadcast the latest local transaction(s) via + /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.force_close_sending_error(channel_id, counterparty_node_id, false) @@ -3690,7 +3701,7 @@ where ProbeSendFailure::RouteNotFound })?; - let mut used_liquidity_map = HashMap::with_capacity(first_hops.len()); + let mut used_liquidity_map = hash_map_with_capacity(first_hops.len()); let mut res = Vec::new(); @@ -4247,7 +4258,7 @@ where let mut failed_forwards = Vec::new(); let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { @@ -4822,10 +4833,6 @@ where // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { - if new_feerate != chan.context.get_feerate_sat_per_1000_weight() { - log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.", - chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); - } return NotifyOption::SkipPersistNoEvents; } if !chan.context.is_live() { @@ -5956,7 +5963,7 @@ where // TODO: Once we can rely on the counterparty_node_id from the // monitor event, this and the outpoint_to_peer map should be removed. let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); - match outpoint_to_peer.get(&funding_txo) { + match outpoint_to_peer.get(funding_txo) { Some(cp_id) => cp_id.clone(), None => return, } @@ -5969,7 +5976,7 @@ where peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let channel = - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { chan } else { let update_actions = peer_state.monitor_update_blocked_actions @@ -6183,12 +6190,14 @@ where fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are // likely to be lost on restart! - if msg.chain_hash != self.chain_hash { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); + if msg.common_fields.chain_hash != self.chain_hash { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } if !self.default_configuration.accept_inbound_channels { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } // Get the number of peers with channels, but without funded ones. We don't care too much @@ -6201,7 +6210,9 @@ where let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()) + MsgHandleErrInternal::send_err_msg_no_close( + format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), + msg.common_fields.temporary_channel_id.clone()) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6215,20 +6226,22 @@ where { return Err(MsgHandleErrInternal::send_err_msg_no_close( "Have too many peers with unfunded channels, not accepting new ones".to_owned(), - msg.temporary_channel_id.clone())); + msg.common_fields.temporary_channel_id.clone())); } let best_block_height = self.best_block.read().unwrap().height(); if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER { return Err(MsgHandleErrInternal::send_err_msg_no_close( format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER), - msg.temporary_channel_id.clone())); + msg.common_fields.temporary_channel_id.clone())); } - let channel_id = msg.temporary_channel_id; + let channel_id = msg.common_fields.temporary_channel_id; let channel_exists = peer_state.has_channel(&channel_id); if channel_exists { - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "temporary_channel_id collision for the same peer!".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. @@ -6236,13 +6249,13 @@ where let channel_type = channel::channel_type_from_open_channel( &msg, &peer_state.latest_features, &self.channel_type_features() ).map_err(|e| - MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id) + MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id) )?; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), + temporary_channel_id: msg.common_fields.temporary_channel_id.clone(), counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, + funding_satoshis: msg.common_fields.funding_satoshis, push_msat: msg.push_msat, channel_type, }, None)); @@ -6262,17 +6275,21 @@ where &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false) { Err(e) => { - return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)); + return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)); }, Ok(res) => res }; let channel_type = channel.context.get_channel_type(); if channel_type.requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "No zero confirmation channels accepted".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } if channel_type.requires_anchors_zero_fee_htlc_tx() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone())); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "No channels with anchor outputs accepted".to_owned(), + msg.common_fields.temporary_channel_id.clone())); } let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); @@ -6294,11 +6311,11 @@ where let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id) + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) { hash_map::Entry::Occupied(mut phase) => { match phase.get_mut() { ChannelPhase::UnfundedOutboundV1(chan) => { @@ -6306,16 +6323,16 @@ where (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id()) }, _ => { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)); + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)); } } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::FundingGenerationReady { - temporary_channel_id: msg.temporary_channel_id, + temporary_channel_id: msg.common_fields.temporary_channel_id, counterparty_node_id: *counterparty_node_id, channel_value_satoshis: value, output_script, @@ -8702,7 +8719,7 @@ where fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), - msg.temporary_channel_id.clone())), *counterparty_node_id); + msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id); } fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) { @@ -8718,7 +8735,7 @@ where fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), - msg.temporary_channel_id.clone())), *counterparty_node_id); + msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) { @@ -9020,8 +9037,8 @@ where return NotifyOption::SkipPersistNoEvents; } e.insert(Mutex::new(PeerState { - channel_by_id: HashMap::new(), - inbound_channel_request_by_id: HashMap::new(), + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -9480,6 +9497,8 @@ impl Writeable for ChannelDetails { (37, user_channel_id_high_opt, option), (39, self.feerate_sat_per_1000_weight, option), (41, self.channel_shutdown_state, option), + (43, self.pending_inbound_htlcs, optional_vec), + (45, self.pending_outbound_htlcs, optional_vec), }); Ok(()) } @@ -9518,6 +9537,8 @@ impl Readable for ChannelDetails { (37, user_channel_id_high_opt, option), (39, feerate_sat_per_1000_weight, option), (41, channel_shutdown_state, option), + (43, pending_inbound_htlcs, optional_vec), + (45, pending_outbound_htlcs, optional_vec), }); // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with @@ -9554,6 +9575,8 @@ impl Readable for ChannelDetails { inbound_htlc_maximum_msat, feerate_sat_per_1000_weight, channel_shutdown_state, + pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()), + pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()), }) } } @@ -10088,7 +10111,7 @@ where } // Encode without retry info for 0.0.101 compatibility. - let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + let mut pending_outbound_payments_no_retry: HashMap> = new_hash_map(); for (id, outbound) in pending_outbound_payments.iter() { match outbound { PendingOutboundPayment::Legacy { session_privs } | @@ -10116,7 +10139,7 @@ where for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() { if !updates.is_empty() { - if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); } + if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); } in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates); } } @@ -10305,7 +10328,9 @@ where mut channel_monitors: Vec<&'a mut ChannelMonitor<::EcdsaSigner>>) -> Self { Self { entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config, - channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() + channel_monitors: hash_map_from_iter( + channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }) + ), } } } @@ -10352,13 +10377,13 @@ where let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; - let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut funded_peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let mut funded_peer_channels: HashMap>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize); + let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) @@ -10444,7 +10469,7 @@ where by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); }, hash_map::Entry::Vacant(entry) => { - let mut by_id_map = HashMap::new(); + let mut by_id_map = new_hash_map(); by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel)); entry.insert(by_id_map); } @@ -10491,7 +10516,7 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -10517,7 +10542,7 @@ where let peer_state_from_chans = |channel_by_id| { PeerState { channel_by_id, - inbound_channel_request_by_id: HashMap::new(), + inbound_channel_request_by_id: new_hash_map(), latest_features: InitFeatures::empty(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -10528,10 +10553,10 @@ where }; let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); + let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; - let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()); + let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map()); let mut peer_state = peer_state_from_chans(peer_chans); peer_state.latest_features = Readable::read(reader)?; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -10565,7 +10590,7 @@ where let highest_seen_timestamp: u32 = Readable::read(reader)?; let pending_inbound_payment_count: u64 = Readable::read(reader)?; - let mut pending_inbound_payments: HashMap = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); + let mut pending_inbound_payments: HashMap = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32))); for _ in 0..pending_inbound_payment_count { if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() { return Err(DecodeError::InvalidValue); @@ -10574,11 +10599,11 @@ where let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; let mut pending_outbound_payments_compat: HashMap = - HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; let payment = PendingOutboundPayment::Legacy { - session_privs: [session_priv].iter().cloned().collect() + session_privs: hash_set_from_iter([session_priv]), }; if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { return Err(DecodeError::InvalidValue) @@ -10588,13 +10613,13 @@ where // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = Some(HashMap::new()); + let mut pending_intercepted_htlcs: Option> = Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(HashMap::new()); + let mut pending_claiming_payments = Some(new_hash_map()); let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); let mut events_override = None; let mut in_flight_monitor_updates: Option>> = None; @@ -10631,7 +10656,7 @@ where if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { - let mut outbounds = HashMap::new(); + let mut outbounds = new_hash_map(); for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } @@ -10740,7 +10765,7 @@ where // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| { - Mutex::new(peer_state_from_chans(HashMap::new())) + Mutex::new(peer_state_from_chans(new_hash_map())) }); let mut peer_state = peer_state_mutex.lock().unwrap(); handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, @@ -10801,7 +10826,7 @@ where retry_strategy: None, attempts: PaymentAttempts::new(), payment_params: None, - session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), + session_privs: hash_set_from_iter([session_priv_bytes]), payment_hash: htlc.payment_hash, payment_secret: None, // only used for retries, and we'll never retry on startup payment_metadata: None, // only used for retries, and we'll never retry on startup @@ -10926,7 +10951,7 @@ where let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); - let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len()); + let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); if let Some(purposes) = claimable_htlc_purposes { if purposes.len() != claimable_htlcs_list.len() { return Err(DecodeError::InvalidValue); @@ -10999,7 +11024,7 @@ where } } - let mut outbound_scid_aliases = HashSet::new(); + let mut outbound_scid_aliases = new_hash_set(); for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11104,7 +11129,7 @@ where downstream_counterparty_and_funding_outpoint: Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. } = action { - if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { + if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) { log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", blocked_channel_id); @@ -12014,14 +12039,15 @@ mod tests { check_added_monitors!(nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source( + &nodes[0].keys_manager); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // Further, because all of our channels with nodes[0] are inbound, and none of them funded, // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS @@ -12069,11 +12095,11 @@ mod tests { for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]); - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // Of course, however, outbound channels are always allowed nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap(); @@ -12109,14 +12135,14 @@ mod tests { for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be // rejected. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // but we can still open an outbound channel. nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); @@ -12125,7 +12151,7 @@ mod tests { // but even with such an outbound channel, additional inbound channels will still fail. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); } #[test] @@ -12161,7 +12187,7 @@ mod tests { _ => panic!("Unexpected event"), } get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk); - open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); + open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } // If we try to accept a channel from another peer non-0conf it will fail. @@ -12183,7 +12209,7 @@ mod tests { _ => panic!("Unexpected event"), } assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, - open_channel_msg.temporary_channel_id); + open_channel_msg.common_fields.temporary_channel_id); // ...however if we accept the same channel 0conf it should work just fine. nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); @@ -12328,7 +12354,7 @@ mod tests { nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); + assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); @@ -12343,7 +12369,7 @@ mod tests { nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); + assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); // Since nodes[1] should not have accepted the channel, it should // not have generated any events. @@ -12532,7 +12558,7 @@ mod tests { let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = HashMap::new(); + let mut forward_htlcs = new_hash_map(); forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());