X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=692d46eda90e8937ec289eed73f47fff6c8d5c8c;hb=b4521f52e29015b344f2acfe1ed8ed0cf95ff076;hp=c787cedfad3a7da3a47e719c842689ee94f62545;hpb=0017bc88a84fa7f253bede430fbb147f6e60c50b;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c787cedf..692d46ed 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -24,7 +24,7 @@ use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::hashes::{Hash, HashEngine}; +use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hash_types::{BlockHash, Txid}; @@ -36,7 +36,7 @@ use bitcoin::secp256k1; use chain; use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock}; -use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to @@ -48,11 +48,11 @@ use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParame use ln::msgs; use ln::msgs::NetAddress; use ln::onion_utils; -use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField}; +use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; use ln::wire::Encode; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient}; -use util::config::UserConfig; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; +use util::config::{UserConfig, ChannelConfig}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use util::{byte_utils, events}; use util::scid_utils::fake_scid; use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -281,7 +281,7 @@ enum ClaimFundsFromHop { DuplicateClaim, } -type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); +type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel @@ -369,15 +369,6 @@ impl MsgHandleErrInternal { }, }, }, - ChannelError::CloseDelayBroadcast(msg) => LightningError { - err: msg.clone(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: msg - }, - }, - }, }, chan_id: None, shutdown_finish: None, @@ -406,10 +397,12 @@ pub(super) enum RAACommitmentOrder { // Note this is only exposed in cfg(test): pub(super) struct ChannelHolder { pub(super) by_id: HashMap<[u8; 32], Channel>, - /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added - /// here once the channel is available for normal use, with SCIDs being added once the funding - /// transaction is confirmed at the channel's required confirmation depth. - pub(super) short_to_id: HashMap, + /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. + /// + /// Outbound SCID aliases are added here once the channel is available for normal use, with + /// SCIDs being added once the funding transaction is confirmed at the channel's required + /// confirmation depth. + pub(super) short_to_chan_info: HashMap, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// /// Note that because we may have an SCID Alias as the key we can have two entries per channel, @@ -695,7 +688,7 @@ pub struct ChannelManager, chain_monitor: M, tx_broadcaster: T, @@ -737,6 +730,25 @@ pub struct ChannelManager>, + /// `channel_id` -> `counterparty_node_id`. + /// + /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As + /// multiple channels with the same `temporary_channel_id` to different peers can exist, + /// allowing `temporary_channel_id`s in this map would cause collisions for such channels. + /// + /// Note that this map should only be used for `MonitorEvent` handling, to be able to access + /// the corresponding channel for the event, as we only have access to the `channel_id` during + /// the handling of the events. + /// + /// TODO: + /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need + /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately + /// would break backwards compatability. + /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the + /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is + /// required to access the channel with the `counterparty_node_id`. + id_to_peer: Mutex>, + our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -749,6 +761,11 @@ pub struct ChannelManager, /// The largest value HTLC (in msat) we currently will accept, for this channel. pub inbound_htlc_maximum_msat: Option, + /// Set of configurable parameters that affect channel operation. + /// + /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. + pub config: Option, } impl ChannelDetails { @@ -1229,9 +1256,9 @@ macro_rules! handle_error { } macro_rules! update_maps_on_chan_removal { - ($self: expr, $short_to_id: expr, $channel: expr) => { + ($self: expr, $short_to_chan_info: expr, $channel: expr) => { if let Some(short_id) = $channel.get_short_channel_id() { - $short_to_id.remove(&short_id); + $short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the // outbound SCID alias we used for it from the collision-prevention set. While we @@ -1242,13 +1269,14 @@ macro_rules! update_maps_on_chan_removal { let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); debug_assert!(alias_removed); } - $short_to_id.remove(&$channel.outbound_scid_alias()); + $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + $short_to_chan_info.remove(&$channel.outbound_scid_alias()); } } /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_err { - ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { + ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => { match $err { ChannelError::Warn(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone())) @@ -1258,18 +1286,11 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_id, $channel); + update_maps_on_chan_removal!($self, $short_to_chan_info, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, - ChannelError::CloseDelayBroadcast(msg) => { - log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_id, $channel); - let shutdown_res = $channel.force_shutdown(false); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), - shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) - } } } } @@ -1279,7 +1300,7 @@ macro_rules! break_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1294,7 +1315,7 @@ macro_rules! try_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1308,18 +1329,18 @@ macro_rules! remove_channel { ($self: expr, $channel_state: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel); + update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel); channel } } } macro_rules! handle_monitor_err { - ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { + ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); - update_maps_on_chan_removal!($self, $short_to_id, $chan); + update_maps_on_chan_removal!($self, $short_to_chan_info, $chan); // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which // will be responsible for failing backwards once things confirm on-chain. @@ -1359,7 +1380,7 @@ macro_rules! handle_monitor_err { } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { - let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } @@ -1405,19 +1426,19 @@ macro_rules! maybe_break_monitor_err { } macro_rules! send_channel_ready { - ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { + ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { node_id: $channel.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id()); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(), + let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id()); - assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(), + let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } } @@ -1457,7 +1478,7 @@ macro_rules! handle_chan_restoration_locked { // Similar to the above, this implies that we're letting the channel_ready fly // before it should be allowed to. assert!(chanmon_update.is_none()); - send_channel_ready!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg); + send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg); } if let Some(msg) = $announcement_sigs { $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -1571,7 +1592,7 @@ impl ChannelMana ChannelManager { default_configuration: config.clone(), genesis_hash: genesis_block(params.network).header.block_hash(), - fee_estimator: fee_est, + fee_estimator: LowerBoundedFeeEstimator::new(fee_est), chain_monitor, tx_broadcaster, @@ -1579,7 +1600,7 @@ impl ChannelMana channel_state: Mutex::new(ChannelHolder{ by_id: HashMap::new(), - short_to_id: HashMap::new(), + short_to_chan_info: HashMap::new(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), @@ -1587,6 +1608,7 @@ impl ChannelMana outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()), + id_to_peer: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), @@ -1595,6 +1617,8 @@ impl ChannelMana inbound_payment_key: expanded_inbound_key, fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(), + probing_cookie_secret: keys_manager.get_secure_random_bytes(), + last_node_announcement_serial: AtomicUsize::new(0), highest_seen_timestamp: AtomicUsize::new(0), @@ -1611,7 +1635,7 @@ impl ChannelMana } } - /// Gets the current configuration applied to all new channels, as + /// Gets the current configuration applied to all new channels. pub fn get_current_default_configuration(&self) -> &UserConfig { &self.default_configuration } @@ -1759,7 +1783,8 @@ impl ChannelMana is_usable: channel.is_live(), is_public: channel.should_announce(), inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat() + inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), + config: Some(channel.config()), }); } } @@ -1836,7 +1861,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { remove_channel!(self, channel_state, chan_entry); break result; @@ -1865,7 +1890,8 @@ impl ChannelMana }; for htlc_source in failed_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -1921,7 +1947,9 @@ impl ChannelMana let (monitor_update_option, mut failed_htlcs) = shutdown_res; log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } if let Some((funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already @@ -1934,7 +1962,8 @@ impl ChannelMana /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result { + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool) + -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1953,7 +1982,7 @@ impl ChannelMana } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.force_shutdown(true)); + self.finish_force_close_channel(chan.force_shutdown(broadcast)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -1964,13 +1993,9 @@ impl ChannelMana Ok(chan.get_counterparty_node_id()) } - /// Force closes a channel, immediately broadcasting the latest local commitment transaction to - /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to - /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding - /// channel. - pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { + fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) { + match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1986,11 +2011,39 @@ impl ChannelMana } } + /// Force closes a channel, immediately broadcasting the latest local transaction(s) and + /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to + /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding + /// channel. + pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) + -> Result<(), APIError> { + self.force_close_sending_error(channel_id, counterparty_node_id, true) + } + + /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting + /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the + /// `counterparty_node_id` isn't the counterparty of the corresponding channel. + /// + /// You can always get the latest local transaction(s) to broadcast from + /// [`ChannelMonitor::get_latest_holder_commitment_txn`]. + pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) + -> Result<(), APIError> { + self.force_close_sending_error(channel_id, counterparty_node_id, false) + } + /// Force close all channels, immediately broadcasting the latest local commitment transaction /// for each to the chain and rejecting new HTLCs on each. - pub fn force_close_all_channels(&self) { + pub fn force_close_all_channels_broadcasting_latest_txn(&self) { for chan in self.list_channels() { - let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id); + let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id); + } + } + + /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest + /// local transaction(s). + pub fn force_close_all_channels_without_broadcasting_txn(&self) { + for chan in self.list_channels() { + let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id); } } @@ -2091,17 +2144,17 @@ impl ChannelMana }) } - fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard>) { + fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus { macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(), failure_code: $err_code, - })), self.channel_state.lock().unwrap()); + })); } } } @@ -2121,25 +2174,20 @@ impl ChannelMana //node knows the HMAC matched, so they already know what is there... return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4); } - - let mut channel_state = None; macro_rules! return_err { ($msg: expr, $err_code: expr, $data: expr) => { { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - if channel_state.is_none() { - channel_state = Some(self.channel_state.lock().unwrap()); - } - return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data), - })), channel_state.unwrap()); + })); } } } - let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) { + let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { return_malformed_err!(err_msg, err_code); @@ -2164,22 +2212,10 @@ impl ChannelMana } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { - let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); - - let blinding_factor = { - let mut sha = Sha256::engine(); - sha.input(&new_pubkey.serialize()[..]); - sha.input(&shared_secret); - Sha256::from_engine(sha).into_inner() - }; - - let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { - Err(e) - } else { Ok(new_pubkey) }; - + let new_pubkey = msg.onion_routing_packet.public_key.unwrap(); let outgoing_packet = msgs::OnionPacket { version: 0, - public_key, + public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret), hop_data: new_packet_bytes, hmac: next_hop_hmac.clone(), }; @@ -2205,14 +2241,14 @@ impl ChannelMana } }; - channel_state = Some(self.channel_state.lock().unwrap()); if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel // with a short_channel_id of 0. This is important as various things later assume // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { - let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); if let Some((err, code, chan_update)) = loop { + let mut channel_state = self.channel_state.lock().unwrap(); + let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned(); let forwarding_id_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a @@ -2223,10 +2259,10 @@ impl ChannelMana break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); } }, - Some(id) => Some(id.clone()), + Some((_cp_id, chan_id)) => Some(chan_id.clone()), }; - let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt { - let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt { + let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap(); if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if @@ -2252,18 +2288,20 @@ impl ChannelMana if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); } - let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64) - .and_then(|prop_fee| { (prop_fee / 1000000) - .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) }); - if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient - break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, chan_update_opt)); + if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *amt_to_forward, *outgoing_cltv_value) { + break Some((err, code, chan_update_opt)); } - (chan_update_opt, chan.get_cltv_expiry_delta()) - } else { (None, MIN_CLTV_EXPIRY_DELTA) }; + chan_update_opt + } else { + if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry + break Some(( + "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", + 0x1000 | 13, None, + )); + } + None + }; - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + forwardee_cltv_expiry_delta as u64 { // incorrect_cltv_expiry - break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, chan_update_opt)); - } let cur_height = self.best_block.read().unwrap().height() + 1; // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, // but we want to be robust wrt to counterparty packet sanitization (see @@ -2310,7 +2348,7 @@ impl ChannelMana } } - (pending_forward_info, channel_state.unwrap()) + pending_forward_info } /// Gets the current channel_update for the given channel. This first checks if the channel is @@ -2357,7 +2395,7 @@ impl ChannelMana flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1), cltv_expiry_delta: chan.get_cltv_expiry_delta(), htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), - htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()), + htlc_maximum_msat: chan.get_announced_htlc_max_msat(), fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), @@ -2402,9 +2440,9 @@ impl ChannelMana } } - let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { + let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), - Some(id) => id.clone(), + Some((_cp_id, chan_id)) => chan_id.clone(), }; macro_rules! insert_outbound_payment { @@ -2529,12 +2567,6 @@ impl ChannelMana if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } - if route.paths.len() > 10 { - // This limit is completely arbitrary - there aren't any real fundamental path-count - // limits. After we support retrying individual paths we should likely bump this, but - // for now more than 10 paths likely carries too much one-path failure. - return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"})); - } if payment_secret.is_none() && route.paths.len() > 1 { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); } @@ -2727,6 +2759,43 @@ impl ChannelMana } } + /// Send a payment that is probing the given route for liquidity. We calculate the + /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows + /// us to easily discern them from real payments. + pub fn send_probe(&self, hops: Vec) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { + let payment_id = PaymentId(self.keys_manager.get_secure_random_bytes()); + + let payment_hash = self.probing_cookie_from_id(&payment_id); + + if hops.len() < 2 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "No need probing a path with less than two hops".to_string() + })) + } + + let route = Route { paths: vec![hops], payment_params: None }; + + match self.send_payment_internal(&route, payment_hash, &None, None, Some(payment_id), None) { + Ok(payment_id) => Ok((payment_hash, payment_id)), + Err(e) => Err(e) + } + } + + /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a + /// payment probe. + pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool { + let target_payment_hash = self.probing_cookie_from_id(payment_id); + target_payment_hash == *payment_hash + } + + /// Returns the 'probing cookie' for the given [`PaymentId`]. + fn probing_cookie_from_id(&self, payment_id: &PaymentId) -> PaymentHash { + let mut preimage = [0u8; 64]; + preimage[..32].copy_from_slice(&self.probing_cookie_secret); + preimage[32..].copy_from_slice(&payment_id.0); + PaymentHash(Sha256::hash(&preimage).into_inner()) + } + /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result>( @@ -2765,6 +2834,10 @@ impl ChannelMana panic!("Generated duplicate funding txid?"); }, hash_map::Entry::Vacant(e) => { + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } e.insert(chan); } } @@ -2783,6 +2856,9 @@ impl ChannelMana /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`]. /// + /// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation + /// across the p2p network. + /// /// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided /// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`]. /// @@ -2798,6 +2874,11 @@ impl ChannelMana /// not currently support replacing a funding transaction on an existing channel. Instead, /// create a new channel with a conflicting funding transaction. /// + /// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend + /// the wallet software generating the funding transaction to apply anti-fee sniping as + /// implemented by Bitcoin Core wallet. See + /// for more details. + /// /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { @@ -2810,6 +2891,18 @@ impl ChannelMana }); } } + { + let height = self.best_block.read().unwrap().height(); + // Transactions are evaluated as final by network mempools at the next block. However, the modules + // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if + // the wallet module is in advance on the LDK view, allow one more block of headroom. + // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped. + if !funding_transaction.input.iter().all(|input| input.sequence == 0xffffffff) && funding_transaction.lock_time < 500_000_000 && funding_transaction.lock_time > height + 2 { + return Err(APIError::APIMisuseError { + err: "Funding transaction absolute timelock is non-final".to_owned() + }); + } + } self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| { let mut output_index = None; let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); @@ -2839,15 +2932,15 @@ impl ChannelMana #[allow(dead_code)] // Messages of up to 64KB should never end up more than half full with addresses, as that would - // be absurd. We ensure this by checking that at least 500 (our stated public contract on when + // be absurd. We ensure this by checking that at least 100 (our stated public contract on when // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB // message... const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; #[deny(const_err)] #[allow(dead_code)] // ...by failing to compile if the number of addresses that would be half of a message is - // smaller than 500: - const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500; + // smaller than 100: + const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 100; /// Regenerates channel_announcements and generates a signed node_announcement from the given /// arguments, providing them in corresponding events via @@ -2864,13 +2957,13 @@ impl ChannelMana /// tying these addresses together and to this node. If you wish to preserve user privacy, /// addresses should likely contain only Tor Onion addresses. /// - /// Panics if `addresses` is absurdly large (more than 500). + /// Panics if `addresses` is absurdly large (more than 100). /// /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - if addresses.len() > 500 { + if addresses.len() > 100 { panic!("More than half the message size was taken up by public addresses!"); } @@ -2920,6 +3013,73 @@ impl ChannelMana } } + /// Atomically updates the [`ChannelConfig`] for the given channels. + /// + /// Once the updates are applied, each eligible channel (advertised with a known short channel + /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`], + /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated + /// containing the new [`ChannelUpdate`] message which should be broadcast to the network. + /// + /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect + /// `counterparty_node_id` is provided. + /// + /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value + /// below [`MIN_CLTV_EXPIRY_DELTA`]. + /// + /// If an error is returned, none of the updates should be considered applied. + /// + /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths + /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat + /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta + /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate + /// [`ChannelUpdate`]: msgs::ChannelUpdate + /// [`ChannelUnavailable`]: APIError::ChannelUnavailable + /// [`APIMisuseError`]: APIError::APIMisuseError + pub fn update_channel_config( + &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig, + ) -> Result<(), APIError> { + if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA { + return Err(APIError::APIMisuseError { + err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA), + }); + } + + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop( + &self.total_consistency_lock, &self.persistence_notifier, + ); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + for channel_id in channel_ids { + let channel_counterparty_node_id = channel_state.by_id.get(channel_id) + .ok_or(APIError::ChannelUnavailable { + err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)), + })? + .get_counterparty_node_id(); + if channel_counterparty_node_id != *counterparty_node_id { + return Err(APIError::APIMisuseError { + err: "counterparty node id mismatch".to_owned(), + }); + } + } + for channel_id in channel_ids { + let channel = channel_state.by_id.get_mut(channel_id).unwrap(); + if !channel.update_config(config) { + continue; + } + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg, + }); + } + } + } + Ok(()) + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -2937,29 +3097,50 @@ impl ChannelMana for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { - let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { - Some(chan_id) => chan_id.clone(), + let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) { + Some((_cp_id, chan_id)) => chan_id.clone(), None => { for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, prev_funding_outpoint } => { + macro_rules! failure_handler { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { + log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + phantom_shared_secret: $phantom_ss, + }); + + let reason = if $next_hop_unknown { + HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id } + } else { + HTLCDestination::FailedPayment{ payment_hash } + }; + + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }, + reason + )); + continue; + } + } macro_rules! fail_forward { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { { - log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - phantom_shared_secret: $phantom_ss, - }); - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code: $err_code, data: $err_data } - )); - continue; + failure_handler!($msg, $err_code, $err_data, $phantom_ss, true); + } + } + } + macro_rules! failed_payment { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + { + failure_handler!($msg, $err_code, $err_data, $phantom_ss, false); } } } @@ -2967,7 +3148,7 @@ impl ChannelMana let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) { let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); - let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { + let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner(); @@ -2975,17 +3156,17 @@ impl ChannelMana // `update_fail_malformed_htlc`, meaning here we encrypt the error as // if it came from us (the second-to-last hop) but contains the sha256 // of the onion. - fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None); + failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None); }, Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => { - fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); + failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); }, }; match next_hop { onion_utils::Hop::Receive(hop_data) => { match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])), - Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -3036,7 +3217,8 @@ impl ChannelMana } let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data } + HTLCFailReason::Reason { failure_code, data }, + HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } )); continue; }, @@ -3106,7 +3288,6 @@ impl ChannelMana // ChannelClosed event is generated by handle_error for us. Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, - ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; handle_errors.push((counterparty_node_id, err)); continue; @@ -3166,7 +3347,7 @@ impl ChannelMana }; macro_rules! fail_htlc { - ($htlc: expr) => { + ($htlc: expr, $payment_hash: expr) => { let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec(); htlc_msat_height_data.extend_from_slice( &byte_utils::be32_to_array(self.best_block.read().unwrap().height()), @@ -3178,7 +3359,8 @@ impl ChannelMana incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, }), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data } + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash: $payment_hash }, )); } } @@ -3197,7 +3379,7 @@ impl ChannelMana if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); continue } } @@ -3219,7 +3401,7 @@ impl ChannelMana if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", log_bytes!(payment_hash.0), total_value, $payment_data.total_msat); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } else if total_value == $payment_data.total_msat { htlcs.push(claimable_htlc); new_events.push(events::Event::PaymentReceived { @@ -3253,7 +3435,7 @@ impl ChannelMana let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { Ok(payment_preimage) => payment_preimage, Err(()) => { - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); continue } }; @@ -3272,7 +3454,7 @@ impl ChannelMana }, hash_map::Entry::Occupied(_) => { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } } } @@ -3281,17 +3463,17 @@ impl ChannelMana hash_map::Entry::Occupied(inbound_payment) => { if payment_data.is_none() { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); continue }; let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).", log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } else { let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); if payment_received_generated { @@ -3310,8 +3492,8 @@ impl ChannelMana } } - for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason); + for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination); } self.forward_htlcs(&mut phantom_receives); @@ -3355,7 +3537,7 @@ impl ChannelMana self.process_background_events(); } - fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + fn update_channel_fee(&self, short_to_chan_info: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3375,7 +3557,7 @@ impl ChannelMana let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { Ok(res) => Ok(res), Err(e) => { - let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); if drop { retain_channel = false; } Err(res) } @@ -3383,7 +3565,7 @@ impl ChannelMana let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); + let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); if drop { retain_channel = false; } res } else { @@ -3416,16 +3598,16 @@ impl ChannelMana PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; - let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); let mut handle_errors = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; channel_state.by_id.retain(|chan_id, chan| { - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if err.is_err() { handle_errors.push(err); @@ -3445,6 +3627,8 @@ impl ChannelMana /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more /// than a minute, informing the network that they should no longer attempt to route over /// the channel. + /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs + /// with the current `ChannelConfig`. /// /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate /// estimate fetches. @@ -3453,7 +3637,7 @@ impl ChannelMana let mut should_persist = NotifyOption::SkipPersist; if self.process_background_events() { should_persist = NotifyOption::DoPersist; } - let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); let mut handle_errors = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -3461,10 +3645,10 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; channel_state.by_id.retain(|chan_id, chan| { let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if err.is_err() { handle_errors.push((err, counterparty_node_id)); @@ -3472,7 +3656,7 @@ impl ChannelMana if !retain_channel { return false; } if let Err(e) = chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); handle_errors.push((Err(err), chan.get_counterparty_node_id())); if needs_close { return false; } } @@ -3503,6 +3687,8 @@ impl ChannelMana _ => {}, } + chan.maybe_expire_prev_config(); + true }); @@ -3530,7 +3716,8 @@ impl ChannelMana } for htlc_source in timed_out_mpp_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }); + let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver ); } for (err, counterparty_node_id) in handle_errors.drain(..) { @@ -3566,7 +3753,8 @@ impl ChannelMana self.best_block.read().unwrap().height())); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }); + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash: *payment_hash }); } } } @@ -3601,7 +3789,8 @@ impl ChannelMana if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); if desired_err_code == 0x1000 | 20 { - // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 + // No flags for `disabled_flags` are currently defined so they're always two zero bytes. + // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008 0u16.write(&mut enc).expect("Writes cannot fail"); } (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail"); @@ -3620,7 +3809,10 @@ impl ChannelMana // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to // be surfaced to the user. - fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) { + fn fail_holding_cell_htlcs( + &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32], + counterparty_node_id: &PublicKey + ) { for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { match htlc_src { HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => { @@ -3632,8 +3824,9 @@ impl ChannelMana hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) }; let channel_state = self.channel_state.lock().unwrap(); - self.fail_htlc_backwards_internal(channel_state, - htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); + + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver) }, HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => { let mut session_priv_bytes = [0; 32]; @@ -3686,7 +3879,7 @@ impl ChannelMana /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) { //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -3726,7 +3919,7 @@ impl ChannelMana return; } mem::drop(channel_state_lock); - let retry = if let Some(payment_params_data) = payment_params { + let mut retry = if let Some(payment_params_data) = payment_params { let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); Some(RouteParameters { payment_params: payment_params_data.clone(), @@ -3742,22 +3935,43 @@ impl ChannelMana let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); - // TODO: If we decided to blame ourselves (or one of our channels) in - // process_onion_failure we should close that channel as it implies our - // next-hop is needlessly blaming us! - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), - payment_hash: payment_hash.clone(), - rejected_by_dest: !payment_retryable, - network_update, - all_paths_failed, - path: path.clone(), - short_channel_id, - retry, -#[cfg(test)] - error_code: onion_error_code, -#[cfg(test)] - error_data: onion_error_data + + if self.payment_is_probe(payment_hash, &payment_id) { + if !payment_retryable { + events::Event::ProbeSuccessful { + payment_id, + payment_hash: payment_hash.clone(), + path: path.clone(), + } + } else { + events::Event::ProbeFailed { + payment_id: payment_id, + payment_hash: payment_hash.clone(), + path: path.clone(), + short_channel_id, + } + } + } else { + // TODO: If we decided to blame ourselves (or one of our channels) in + // process_onion_failure we should close that channel as it implies our + // next-hop is needlessly blaming us! + if let Some(scid) = short_channel_id { + retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); + } + events::Event::PaymentPathFailed { + payment_id: Some(payment_id), + payment_hash: payment_hash.clone(), + rejected_by_dest: !payment_retryable, + network_update, + all_paths_failed, + path: path.clone(), + short_channel_id, + retry, + #[cfg(test)] + error_code: onion_error_code, + #[cfg(test)] + error_data: onion_error_data + } } }, &HTLCFailReason::Reason { @@ -3773,6 +3987,8 @@ impl ChannelMana // ChannelDetails. // TODO: For non-temporary failures, we really should be closing the // channel here as we apparently can't relay through them anyway. + let scid = path.first().unwrap().short_channel_id; + retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); events::Event::PaymentPathFailed { payment_id: Some(payment_id), payment_hash: payment_hash.clone(), @@ -3780,7 +3996,7 @@ impl ChannelMana network_update: None, all_paths_failed, path: path.clone(), - short_channel_id: Some(path.first().unwrap().short_channel_id), + short_channel_id: Some(scid), retry, #[cfg(test)] error_code: Some(*failure_code), @@ -3793,7 +4009,7 @@ impl ChannelMana pending_events.push(path_failure); if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => { let err_packet = match onion_error { HTLCFailReason::Reason { failure_code, data } => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); @@ -3825,12 +4041,16 @@ impl ChannelMana } } mem::drop(channel_state_lock); + let mut pending_events = self.pending_events.lock().unwrap(); if let Some(time) = forward_event { - let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::PendingHTLCsForwardable { time_forwardable: time }); } + pending_events.push(events::Event::HTLCHandlingFailed { + prev_channel_id: outpoint.to_channel_id(), + failed_next_destination: destination + }); }, } } @@ -3878,7 +4098,7 @@ impl ChannelMana let mut expected_amt_msat = None; let mut valid_mpp = true; for htlc in sources.iter() { - if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { + if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } @@ -3922,7 +4142,8 @@ impl ChannelMana self.best_block.read().unwrap().height())); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); + HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash } ); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { @@ -3967,8 +4188,8 @@ impl ChannelMana fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { - Some(chan_id) => chan_id.clone(), + let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { + Some((_cp_id, chan_id)) => chan_id.clone(), None => { return ClaimFundsFromHop::PrevHopForceClosed } @@ -4015,7 +4236,7 @@ impl ChannelMana payment_preimage, e); } let counterparty_node_id = chan.get().get_counterparty_node_id(); - let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); if drop { chan.remove_entry(); } @@ -4166,7 +4387,7 @@ impl ChannelMana let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let chan_restoration_res; - let (mut pending_failures, finalized_claims) = { + let (mut pending_failures, finalized_claims, counterparty_node_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { @@ -4177,6 +4398,7 @@ impl ChannelMana return; } + let counterparty_node_id = channel.get().get_counterparty_node_id(); let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height()); let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() { // We only send a channel_update in the case where we are just now sending a @@ -4195,12 +4417,14 @@ impl ChannelMana if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } - (updates.failed_htlcs, updates.finalized_claimed_htlcs) + + (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id) }; post_handle_chan_restoration!(self, chan_restoration_res); self.finalize_claims(finalized_claims); for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver); } } @@ -4349,7 +4573,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.peer_channel_config_limits, &their_features), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) @@ -4414,12 +4638,23 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + match id_to_peer.entry(chan.channel_id()) { + hash_map::Entry::Occupied(_) => { + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), + funding_msg.channel_id)) + }, + hash_map::Entry::Vacant(i_e) => { + i_e.insert(chan.get_counterparty_node_id()); + } + } channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, }); if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg); + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); } e.insert(chan); } @@ -4454,7 +4689,7 @@ impl ChannelMana return res } if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg); + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); } funding_tx }, @@ -4527,7 +4762,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { remove_channel!(self, channel_state, chan_entry); break result; @@ -4548,7 +4783,8 @@ impl ChannelMana } }; for htlc_source in dropped_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -4609,7 +4845,8 @@ impl ChannelMana //encrypted with the same key. It's not immediately obvious how to usefully exploit that, //but we should prevent it anyway. - let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); + let pending_forward_info = self.decode_update_add_htlc_onion(msg); + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(msg.channel_id) { @@ -4828,13 +5065,14 @@ impl ChannelMana hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id); + self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); match res { Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs, short_channel_id, channel_outpoint)) => { for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver); } self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); self.finalize_claims(finalized_claim_htlcs); @@ -4889,8 +5127,8 @@ impl ChannelMana fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { - Some(chan_id) => chan_id.clone(), + let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) { + Some((_cp_id, chan_id)) => chan_id.clone(), None => { // It's not a local channel return Ok(NotifyOption::SkipPersist) @@ -4968,7 +5206,7 @@ impl ChannelMana } }; post_handle_chan_restoration!(self, chan_restoration_res); - self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id); + self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id); if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; @@ -4981,7 +5219,7 @@ impl ChannelMana let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) { + for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { @@ -4990,7 +5228,8 @@ impl ChannelMana self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | @@ -5059,19 +5298,23 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; by_id.retain(|channel_id, chan| { match chan.maybe_free_holding_cell_htlcs(&self.logger) { Ok((commitment_opt, holding_cell_failed_htlcs)) => { if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push((holding_cell_failed_htlcs, *channel_id)); + failed_htlcs.push(( + holding_cell_failed_htlcs, + *channel_id, + chan.get_counterparty_node_id() + )); } if let Some((commitment_update, monitor_update)) = commitment_opt { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); + let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); handle_errors.push((chan.get_counterparty_node_id(), res)); if close_channel { return false; } } else { @@ -5084,7 +5327,7 @@ impl ChannelMana true }, Err(e) => { - let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); // ChannelClosed event is generated by handle_error for us !close_channel @@ -5094,8 +5337,8 @@ impl ChannelMana } let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); - for (failures, channel_id) in failed_htlcs.drain(..) { - self.fail_holding_cell_htlcs(failures, channel_id); + for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) { + self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id); } for (counterparty_node_id, err) in handle_errors.drain(..) { @@ -5115,7 +5358,7 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; by_id.retain(|channel_id, chan| { @@ -5140,13 +5383,13 @@ impl ChannelMana log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); - update_maps_on_chan_removal!(self, short_to_id, chan); + update_maps_on_chan_removal!(self, short_to_chan_info, chan); false } else { true } }, Err(e) => { has_update = true; - let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); !close_channel } @@ -5341,7 +5584,7 @@ impl ChannelMana loop { let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); // Ensure the generated scid doesn't conflict with a real channel. - match channel_state.short_to_id.entry(scid_candidate) { + match channel_state.short_to_chan_info.entry(scid_candidate) { hash_map::Entry::Occupied(_) => continue, hash_map::Entry::Vacant(_) => return scid_candidate } @@ -5576,7 +5819,7 @@ where fn get_relevant_txids(&self) -> Vec { let channel_state = self.channel_state.lock().unwrap(); - let mut res = Vec::with_capacity(channel_state.short_to_id.len()); + let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len()); for chan in channel_state.by_id.values() { if let Some(funding_txo) = chan.get_funding_txo() { res.push(funding_txo.txid); @@ -5619,7 +5862,7 @@ where { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|_, channel| { let res = f(channel); @@ -5628,10 +5871,10 @@ where let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { failure_code, data, - })); + }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { - send_channel_ready!(short_to_id, pending_msg_events, channel, channel_ready); + send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); if channel.is_usable() { log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { @@ -5664,18 +5907,19 @@ where if channel.is_our_channel_ready() { if let Some(real_scid) = channel.get_short_channel_id() { // If we sent a 0conf channel_ready, and now have an SCID, we add it - // to the short_to_id map here. Note that we check whether we can relay - // using the real SCID at relay-time (i.e. enforce option_scid_alias - // then), and if the funding tx is ever un-confirmed we force-close the - // channel, ensuring short_to_id is always consistent. - let scid_insert = short_to_id.insert(real_scid, channel.channel_id()); - assert!(scid_insert.is_none() || scid_insert.unwrap() == channel.channel_id(), + // to the short_to_chan_info map here. Note that we check whether we + // can relay using the real SCID at relay-time (i.e. + // enforce option_scid_alias then), and if the funding tx is ever + // un-confirmed we force-close the channel, ensuring short_to_chan_info + // is always consistent. + let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", fake_scid::MAX_SCID_BLOCKS_FROM_NOW); } } } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, short_to_id, channel); + update_maps_on_chan_removal!(self, short_to_chan_info, channel); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.force_shutdown(true)); @@ -5708,10 +5952,11 @@ where if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data - })); + }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } }); @@ -5722,8 +5967,8 @@ where self.handle_init_event_channel_failures(failed_channels); - for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination); } } @@ -5866,14 +6111,14 @@ impl let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); channel_state.by_id.retain(|_, chan| { if chan.get_counterparty_node_id() == *counterparty_node_id { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { - update_maps_on_chan_removal!(self, short_to_id, chan); + update_maps_on_chan_removal!(self, short_to_chan_info, chan); self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { @@ -5965,7 +6210,7 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true); } } } else { @@ -5987,7 +6232,7 @@ impl } // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true); } } } @@ -6088,6 +6333,7 @@ impl_writeable_tlv_based!(ChannelDetails, { (4, counterparty, required), (5, outbound_scid_alias, option), (6, funding_txo, option), + (7, config, option), (8, short_channel_id, option), (10, channel_value_satoshis, required), (12, unspendable_punishment_reserve, option), @@ -6096,7 +6342,7 @@ impl_writeable_tlv_based!(ChannelDetails, { (18, outbound_capacity_msat, required), // Note that by the time we get past the required read above, outbound_capacity_msat will be // filled in, so we can safely unwrap it here. - (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())), + (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), (20, inbound_capacity_msat, required), (22, confirmations_required, option), (24, force_close_spend_delay, option), @@ -6529,6 +6775,7 @@ impl Writeable f (5, self.our_network_pubkey, required), (7, self.fake_scid_rand_bytes, required), (9, htlc_purposes, vec_type), + (11, self.probing_cookie_secret, required), }); Ok(()) @@ -6672,7 +6919,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, (&args.keys_manager, best_block_height))?; @@ -6712,7 +6960,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_id.insert(short_channel_id, channel.channel_id()); + short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); + } + if channel.is_funding_initiated() { + id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); } by_id.insert(channel.channel_id(), channel); } @@ -6825,6 +7076,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut pending_outbound_payments = None; let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; + let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), @@ -6832,11 +7084,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> (5, received_network_pubkey, option), (7, fake_scid_rand_bytes, option), (9, claimable_htlc_purposes, vec_type), + (11, probing_cookie_secret, option), }); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes()); } + if probing_cookie_secret.is_none() { + probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes()); + } + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { @@ -6971,7 +7228,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> return Err(DecodeError::InvalidValue); } if chan.is_usable() { - if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() { + if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); @@ -6980,6 +7237,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator); + for (_, monitor) in args.channel_monitors.iter() { for (payment_hash, payment_preimage) in monitor.get_stored_preimages() { if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) { @@ -7008,7 +7267,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); } if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { - previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger); + previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger); } } pending_events_read.push(events::Event::PaymentClaimed { @@ -7022,7 +7281,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let channel_manager = ChannelManager { genesis_hash, - fee_estimator: args.fee_estimator, + fee_estimator: bounded_fee_estimator, chain_monitor: args.chain_monitor, tx_broadcaster: args.tx_broadcaster, @@ -7030,7 +7289,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel_state: Mutex::new(ChannelHolder { by_id, - short_to_id, + short_to_chan_info, forward_htlcs, claimable_htlcs, pending_msg_events: Vec::new(), @@ -7040,8 +7299,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), + id_to_peer: Mutex::new(id_to_peer), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), + probing_cookie_secret: probing_cookie_secret.unwrap(), + our_network_key, our_network_pubkey, secp_ctx, @@ -7062,7 +7324,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> }; for htlc_source in failed_htlcs.drain(..) { - channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -7087,7 +7351,7 @@ mod tests { use ln::msgs::ChannelMessageHandler; use routing::router::{PaymentParameters, RouteParameters, find_route}; use util::errors::APIError; - use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::test_utils; use chain::keysinterface::KeysInterface; @@ -7250,7 +7514,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7358,8 +7622,8 @@ mod tests { final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let route = find_route( - &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None, - nodes[0].logger, &scorer, &random_seed_bytes + &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, + None, nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7370,8 +7634,10 @@ mod tests { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // We have to forward pending HTLCs twice - once tries to forward the payment forward (and + // fails), the second will process the resulting failure and fail the HTLC backward expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7389,8 +7655,8 @@ mod tests { // To start (2), send a keysend payment but don't claim it. let payment_preimage = PaymentPreimage([42; 32]); let route = find_route( - &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None, - nodes[0].logger, &scorer, &random_seed_bytes + &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, + None, nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -7412,7 +7678,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7453,7 +7719,7 @@ mod tests { let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( - &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::>()), + &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); @@ -7497,7 +7763,7 @@ mod tests { let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( - &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::>()), + &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); @@ -7576,6 +7842,119 @@ mod tests { // Check that using the original payment hash succeeds. assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok()); } + + #[test] + fn test_id_to_peer_coverage() { + // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned + // a `channel_id` (i.e. have had the funding tx created), and that they are removed once + // the channel is successfully closed. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let channel_id = &tx.txid().into_inner(); + { + // Ensure that the `id_to_peer` map is empty until either party has received the + // funding transaction, and have the real `channel_id`. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + { + // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as + // as it has the funding transaction. + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + { + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as + // as it has the funding transaction. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + check_added_monitors!(nodes[1], 1); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); + check_added_monitors!(nodes[0], 1); + let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); + + nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &nodes_1_shutdown); + + let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); + { + // Assert that the channel is kept in the `id_to_peer` map for both nodes until the + // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the + // fee for the closing transaction has been negotiated and the parties has the other + // party's signature for the fee negotiated closing transaction.) + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the + // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature + // from `nodes[0]` for the closing transaction with the proposed fee, the channel is + // kept in the `nodes[1]`'s `id_to_peer` map. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); + { + // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and + // therefore has all it needs to fully close the channel (both signatures for the + // closing transaction). + // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be + // fully closed by `nodes[0]`. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + + // Assert that the channel is still in `nodes[1]`'s `id_to_peer` map, as `nodes[1]` + // doesn't have `nodes[0]`'s signature for the closing transaction yet. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + + let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap()); + { + // Assert that the channel has now been removed from both parties `id_to_peer` map once + // they both have everything required to fully close the channel. + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] @@ -7627,7 +8006,7 @@ pub mod bench { let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let mut config: UserConfig = Default::default(); - config.own_channel_config.minimum_depth = 1; + config.channel_handshake_config.minimum_depth = 1; let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a); @@ -7690,7 +8069,7 @@ pub mod bench { _ => panic!(), } - let dummy_graph = NetworkGraph::new(genesis_hash); + let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a); let mut payment_count: u64 = 0; macro_rules! send_payment {