X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=04a455233d993cb2ff6d48bef446ba7c2876e15c;hb=cb1d795559611929264a1f5fa16a76283df5f2b4;hp=d206dee4950f3f9908e0cbdd8af6755d485de2a2;hpb=0e0aabea07afcdae2008a653bb9a41f340d571fa;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d206dee4..04a45523 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -441,6 +441,7 @@ struct ClaimableHTLC { cltv_expiry: u32, value: u64, onion_payload: OnionPayload, + timer_ticks: u8, } /// A payment identifier used to uniquely identify a payment to LDK. @@ -660,8 +661,16 @@ pub(super) enum RAACommitmentOrder { // Note this is only exposed in cfg(test): pub(super) struct ChannelHolder { pub(super) by_id: HashMap<[u8; 32], Channel>, + /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added + /// here once the channel is available for normal use, with SCIDs being added once the funding + /// transaction is confirmed at the channel's required confirmation depth. pub(super) short_to_id: HashMap, - /// short channel id -> forward infos. Key of 0 means payments received + /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. + /// + /// Note that because we may have an SCID Alias as the key we can have two entries per channel, + /// though in practice we probably won't be receiving HTLCs for a channel both via the alias + /// and via the classic SCID. + /// /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingHTLCInfo! @@ -878,6 +887,8 @@ impl PendingOutboundPayment { /// issues such as overly long function definitions. Note that the ChannelManager can take any /// type that implements KeysInterface for its keys manager, but this type alias chooses the /// concrete type of the KeysManager. +/// +/// (C-not exported) as Arcs don't make sense in bindings pub type SimpleArcChannelManager = ChannelManager, Arc, Arc, Arc, Arc>; /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference @@ -888,6 +899,8 @@ pub type SimpleArcChannelManager = ChannelManager = ChannelManager; /// Manager which keeps track of a number of channels and sends messages to the appropriate @@ -971,6 +984,12 @@ pub struct ChannelManager>, + /// The set of outbound SCID aliases across all our channels, including unconfirmed channels + /// and some closed channels which reached a usable state prior to being closed. This is used + /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the + /// active channel list on load. + outbound_scid_aliases: Mutex>, + our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -1134,6 +1153,9 @@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_G /// pending HTLCs in flight. pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3; +/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs +pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3; + /// Information needed for constructing an invoice route hint for this channel. #[derive(Clone, Debug, PartialEq)] pub struct CounterpartyForwardingInfo { @@ -1188,7 +1210,20 @@ pub struct ChannelDetails { pub funding_txo: Option, /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. + /// + /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound + /// payments instead of this. See [`get_inbound_payment_scid`]. + /// + /// [`inbound_scid_alias`]: Self::inbound_scid_alias + /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid pub short_channel_id: Option, + /// An optional [`short_channel_id`] alias for this channel, randomly generated by our + /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our + /// counterparty will recognize the alias provided here in place of the [`short_channel_id`] + /// when they see a payment to be routed to us. + /// + /// [`short_channel_id`]: Self::short_channel_id + pub inbound_scid_alias: Option, /// The value, in satoshis, of this channel as appears in the funding output pub channel_value_satoshis: u64, /// The value, in satoshis, that must always be held in the channel for us. This value ensures @@ -1274,6 +1309,15 @@ pub struct ChannelDetails { pub is_public: bool, } +impl ChannelDetails { + /// Gets the SCID which should be used to identify this channel for inbound payments. This + /// should be used for providing invoice hints or in any other context where our counterparty + /// will forward a payment to us. + pub fn get_inbound_payment_scid(&self) -> Option { + self.inbound_scid_alias.or(self.short_channel_id) + } +} + /// If a payment fails to send, it can be in one of several states. This enum is returned as the /// Err() type describing which state the payment is in, see the description of individual enum /// states for more. @@ -1383,6 +1427,24 @@ macro_rules! handle_error { } } +macro_rules! update_maps_on_chan_removal { + ($self: expr, $short_to_id: expr, $channel: expr) => { + if let Some(short_id) = $channel.get_short_channel_id() { + $short_to_id.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); + debug_assert!(alias_removed); + } + $short_to_id.remove(&$channel.outbound_scid_alias()); + } +} + /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { @@ -1395,18 +1457,14 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - if let Some(short_id) = $channel.get_short_channel_id() { - $short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!($self, $short_to_id, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); - if let Some(short_id) = $channel.get_short_channel_id() { - $short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!($self, $short_to_id, $channel); let shutdown_res = $channel.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) @@ -1446,28 +1504,21 @@ macro_rules! try_chan_entry { } macro_rules! remove_channel { - ($channel_state: expr, $entry: expr) => { + ($self: expr, $channel_state: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - if let Some(short_id) = channel.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel); channel } } } macro_rules! handle_monitor_err { - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) - }; ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); - if let Some(short_id) = $chan.get_short_channel_id() { - $short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!($self, $short_to_id, $chan); // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which // will be responsible for failing backwards once things confirm on-chain. @@ -1513,9 +1564,19 @@ macro_rules! handle_monitor_err { } res } }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { { + debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst); + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, Vec::new(), Vec::new(), Vec::new(), $chan_id) + } }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => { + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) + }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new(), Vec::new()) + }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new()) - } + }; } macro_rules! return_monitor_err { @@ -1539,15 +1600,34 @@ macro_rules! maybe_break_monitor_err { } } +macro_rules! send_funding_locked { + ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $funding_locked_msg: expr) => { + $pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: $channel.get_counterparty_node_id(), + msg: $funding_locked_msg, + }); + // Note that we may send a funding locked multiple times for a channel if we reconnect, so + // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. + let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id()); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(), + "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); + if let Some(real_scid) = $channel.get_short_channel_id() { + let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id()); + assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(), + "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); + } + } +} + macro_rules! handle_chan_restoration_locked { ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr, $announcement_sigs: expr) => { { let mut htlc_forwards = None; - let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); let chanmon_update: Option = $chanmon_update; // Force type-checking to resolve let chanmon_update_is_none = chanmon_update.is_none(); + let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); let res = loop { let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve if !forwards.is_empty() { @@ -1573,11 +1653,7 @@ macro_rules! handle_chan_restoration_locked { // Similar to the above, this implies that we're letting the funding_locked fly // before it should be allowed to. assert!(chanmon_update.is_none()); - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: counterparty_node_id, - msg, - }); - $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id()); + send_funding_locked!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg); } if let Some(msg) = $announcement_sigs { $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -1706,6 +1782,7 @@ impl ChannelMana claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), }), + outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()), @@ -1737,6 +1814,25 @@ impl ChannelMana &self.default_configuration } + fn create_and_insert_outbound_scid_alias(&self) -> u64 { + let height = self.best_block.read().unwrap().height(); + let mut outbound_scid_alias = 0; + let mut i = 0; + loop { + if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias + outbound_scid_alias += 1; + } else { + outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); + } + if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) { + break; + } + i += 1; + if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); } + } + outbound_scid_alias + } + /// Creates a new outbound channel to the given remote node and with the given value. /// /// `user_channel_id` will be provided back as in @@ -1772,11 +1868,20 @@ impl ChannelMana let per_peer_state = self.per_peer_state.read().unwrap(); match per_peer_state.get(&their_network_key) { Some(peer_state) => { + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, - channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height())? + match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, + their_features, channel_value_satoshis, push_msat, user_channel_id, config, + self.best_block.read().unwrap().height(), outbound_scid_alias) + { + Ok(res) => res, + Err(e) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(e); + }, + } }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), } @@ -1826,6 +1931,7 @@ impl ChannelMana }, funding_txo: channel.get_funding_txo(), short_channel_id: channel.get_short_channel_id(), + inbound_scid_alias: channel.latest_inbound_scid_alias(), channel_value_satoshis: channel.get_value_satoshis(), unspendable_punishment_reserve: to_self_reserve_satoshis, balance_msat, @@ -1911,9 +2017,9 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key()); + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { - remove_channel!(channel_state, chan_entry); + remove_channel!(self, channel_state, chan_entry); break result; } } @@ -1925,7 +2031,7 @@ impl ChannelMana }); if chan_entry.get().is_shutdown() { - let channel = remove_channel!(channel_state, chan_entry); + let channel = remove_channel!(self, channel_state, chan_entry); if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: channel_update @@ -2019,9 +2125,6 @@ impl ChannelMana return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } } - if let Some(short_id) = chan.get().get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } if peer_node_id.is_some() { if let Some(peer_msg) = peer_msg { self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); @@ -2029,7 +2132,7 @@ impl ChannelMana } else { self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } - chan.remove_entry().1 + remove_channel!(self, channel_state, chan) } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } @@ -3176,12 +3279,9 @@ impl ChannelMana } ChannelError::Close(msg) => { log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let (channel_id, mut channel) = chan.remove_entry(); - if let Some(short_id) = channel.get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } + let mut channel = remove_channel!(self, channel_state, chan); // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; @@ -3234,6 +3334,7 @@ impl ChannelMana phantom_shared_secret, }, value: amt_to_forward, + timer_ticks: 0, cltv_expiry, onion_payload, }; @@ -3455,7 +3556,7 @@ impl ChannelMana let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), chan_id); + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); if drop { retain_channel = false; } res } else { @@ -3528,6 +3629,7 @@ impl ChannelMana let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); let mut handle_errors = Vec::new(); + let mut timed_out_mpp_htlcs = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3576,6 +3678,32 @@ impl ChannelMana true }); + + channel_state.claimable_htlcs.retain(|payment_hash, htlcs| { + if htlcs.is_empty() { + // This should be unreachable + debug_assert!(false); + return false; + } + if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload { + // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). + // In this case we're not going to handle any timeouts of the parts here. + if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { + return true; + } else if htlcs.into_iter().any(|htlc| { + htlc.timer_ticks += 1; + return htlc.timer_ticks >= MPP_TIMEOUT_TICKS + }) { + timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone()))); + return false; + } + } + true + }); + } + + for htlc_source in timed_out_mpp_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }); } for (err, counterparty_node_id) in handle_errors.drain(..) { @@ -4191,13 +4319,24 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - let mut channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), - &their_features, msg, 0, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger) - .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); + let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager, + counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration, + self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) + { + Err(e) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)); + }, + Ok(res) => res + }; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(channel.channel_id()) { - hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())), + hash_map::Entry::Occupied(_) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())) + }, hash_map::Entry::Vacant(entry) => { if !self.default_configuration.manually_accept_inbound_channels { channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { @@ -4399,9 +4538,9 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key()); + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { - remove_channel!(channel_state, chan_entry); + remove_channel!(self, channel_state, chan_entry); break result; } } @@ -4449,10 +4588,7 @@ impl ChannelMana // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - if let Some(short_id) = chan_entry.get().get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - (tx, Some(chan_entry.remove_entry().1)) + (tx, Some(remove_channel!(self, channel_state, chan_entry))) } else { (tx, None) } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -4890,12 +5026,9 @@ impl ChannelMana let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; let pending_msg_events = &mut channel_state.pending_msg_events; - if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) { - if let Some(short_id) = chan.get_short_channel_id() { - short_to_id.remove(&short_id); - } + if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) { + let mut chan = remove_channel!(self, channel_state, chan_entry); failed_channels.push(chan.force_shutdown(false)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -4965,7 +5098,7 @@ impl ChannelMana if let Some((commitment_update, monitor_update)) = commitment_opt { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), channel_id); + let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); handle_errors.push((chan.get_counterparty_node_id(), res)); if close_channel { return false; } } else { @@ -5024,10 +5157,6 @@ impl ChannelMana if let Some(tx) = tx_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - if let Some(short_id) = chan.get_short_channel_id() { - short_to_id.remove(&short_id); - } - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -5038,6 +5167,7 @@ impl ChannelMana log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); + update_maps_on_chan_removal!(self, short_to_id, chan); false } else { true } }, @@ -5145,6 +5275,8 @@ impl ChannelMana /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share /// serialized state with LDK node(s) running 0.0.103 and earlier. /// + /// May panic if `invoice_expiry_delta_secs` is greater than one year. + /// /// # Note /// This method is deprecated and will be removed soon. /// @@ -5185,8 +5317,6 @@ impl ChannelMana /// If you need exact expiry semantics, you should enforce them upon receipt of /// [`PaymentReceived`]. /// - /// May panic if `invoice_expiry_delta_secs` is greater than one year. - /// /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry` /// set to at least [`MIN_FINAL_CLTV_EXPIRY`]. /// @@ -5209,6 +5339,8 @@ impl ChannelMana /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share /// serialized state with LDK node(s) running 0.0.103 and earlier. /// + /// May panic if `invoice_expiry_delta_secs` is greater than one year. + /// /// # Note /// This method is deprecated and will be removed soon. /// @@ -5234,7 +5366,7 @@ impl ChannelMana let mut channel_state = self.channel_state.lock().unwrap(); let best_block = self.best_block.read().unwrap(); loop { - let scid_candidate = fake_scid::get_phantom_scid(&self.fake_scid_rand_bytes, best_block.height(), &self.genesis_hash, &self.keys_manager); + let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); // Ensure the generated scid doesn't conflict with a real channel. match channel_state.short_to_id.entry(scid_candidate) { hash_map::Entry::Occupied(_) => continue, @@ -5409,6 +5541,12 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger) .map(|(a, b)| (a, Vec::new(), b))); + + let last_best_block_height = self.best_block.read().unwrap().height(); + if height < last_best_block_height { + let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire); + self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)); + } } fn best_block_updated(&self, header: &BlockHeader, height: u32) { @@ -5522,10 +5660,7 @@ where })); } if let Some(funding_locked) = funding_locked_opt { - pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { - node_id: channel.get_counterparty_node_id(), - msg: funding_locked, - }); + send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked); if channel.is_usable() { log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { @@ -5535,7 +5670,6 @@ where } else { log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); } if let Some(announcement_sigs) = announcement_sigs { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); @@ -5555,9 +5689,7 @@ where } } } else if let Err(reason) = res { - if let Some(short_id) = channel.get_short_channel_id() { - short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!(self, short_to_id, channel); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.force_shutdown(true)); @@ -5747,15 +5879,13 @@ impl { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let short_to_id = &mut channel_state.short_to_id; let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; if no_connection_possible { log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_counterparty_node_id() == *counterparty_node_id { - if let Some(short_id) = chan.get_short_channel_id() { - short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!(self, short_to_id, chan); failed_channels.push(chan.force_shutdown(true)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -5774,9 +5904,7 @@ impl if chan.get_counterparty_node_id() == *counterparty_node_id { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { - if let Some(short_id) = chan.get_short_channel_id() { - short_to_id.remove(&short_id); - } + update_maps_on_chan_removal!(self, short_to_id, chan); self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { @@ -5807,6 +5935,7 @@ impl &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, &events::MessageSendEvent::SendReplyChannelRange { .. } => false, + &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, } }); } @@ -5966,6 +6095,7 @@ impl_writeable_tlv_based!(ChannelCounterparty, { }); impl_writeable_tlv_based!(ChannelDetails, { + (1, inbound_scid_alias, option), (2, channel_id, required), (4, counterparty, required), (6, funding_txo, option), @@ -6146,6 +6276,7 @@ impl Readable for ClaimableHTLC { }; Ok(Self { prev_hop: prev_hop.0.unwrap(), + timer_ticks: 0, value, onion_payload, cltv_expiry, @@ -6775,6 +6906,32 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } + let mut outbound_scid_aliases = HashSet::new(); + for (chan_id, chan) in by_id.iter_mut() { + if chan.outbound_scid_alias() == 0 { + let mut outbound_scid_alias; + loop { + outbound_scid_alias = fake_scid::Namespace::OutboundAlias + .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); + if outbound_scid_aliases.insert(outbound_scid_alias) { break; } + } + chan.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { + // Note that in rare cases its possible to hit this while reading an older + // channel if we just happened to pick a colliding outbound alias above. + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + return Err(DecodeError::InvalidValue); + } + if chan.is_usable() { + if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() { + // Note that in rare cases its possible to hit this while reading an older + // channel if we just happened to pick a colliding outbound alias above. + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + return Err(DecodeError::InvalidValue); + } + } + } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); let channel_manager = ChannelManager { @@ -6795,6 +6952,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + + outbound_scid_aliases: Mutex::new(outbound_scid_aliases), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), our_network_key, @@ -6844,6 +7003,7 @@ mod tests { use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; use util::test_utils; + use chain::keysinterface::KeysInterface; #[cfg(feature = "std")] #[test] @@ -7097,6 +7257,7 @@ mod tests { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let scorer = test_utils::TestScorer::with_penalty(0); + let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); // To start (1), send a regular payment but don't claim it. let expected_route = [&nodes[1]]; @@ -7110,7 +7271,7 @@ mod tests { }; let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None, - nodes[0].logger, &scorer + nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7141,7 +7302,7 @@ mod tests { let payment_preimage = PaymentPreimage([42; 32]); let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None, - nodes[0].logger, &scorer + nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7202,9 +7363,10 @@ mod tests { let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); let scorer = test_utils::TestScorer::with_penalty(0); + let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer + nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -7245,9 +7407,10 @@ mod tests { let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); let scorer = test_utils::TestScorer::with_penalty(0); + let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer + nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -7331,7 +7494,7 @@ mod tests { pub mod bench { use chain::Listen; use chain::chainmonitor::{ChainMonitor, Persist}; - use chain::keysinterface::{KeysManager, InMemorySigner}; + use chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner}; use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; @@ -7340,7 +7503,7 @@ pub mod bench { use routing::router::{PaymentParameters, get_route}; use util::test_utils; use util::config::UserConfig; - use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -7448,8 +7611,11 @@ pub mod bench { let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id()) .with_features(InvoiceFeatures::known()); let scorer = test_utils::TestScorer::with_penalty(0); - let route = get_route(&$node_a.get_our_node_id(), &payment_params, &dummy_graph, - Some(&usable_channels.iter().map(|r| r).collect::>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer).unwrap(); + let seed = [3u8; 32]; + let keys_manager = KeysManager::new(&seed, 42, 42); + let random_seed_bytes = keys_manager.get_secure_random_bytes(); + let route = get_route(&$node_a.get_our_node_id(), &payment_params, &dummy_graph.read_only(), + Some(&usable_channels.iter().map(|r| r).collect::>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer, &random_seed_bytes).unwrap(); let mut payment_preimage = PaymentPreimage([0; 32]); payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());