X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=9d656d02286f57a5b85b42593b4ac01a397871fd;hb=4cee62233cad5cc80e29208e7e7f633324a4abaf;hp=398f395492f148586ffe15312c6fda58bd1f2cd0;hpb=166f32621d502de13b359bdeccb04d3ef3577f3d;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 398f3954..9d656d02 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch}; +use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; @@ -56,7 +56,7 @@ use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; use crate::ln::wire::Encode; use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner}; -use crate::util::config::{UserConfig, ChannelConfig}; +use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; use crate::util::string::UntrustedString; @@ -112,6 +112,8 @@ pub(super) enum PendingHTLCRouting { phantom_shared_secret: Option<[u8; 32]>, }, ReceiveKeysend { + /// This was added in 0.0.116 and will break deserialization on downgrades. + payment_data: Option, payment_preimage: PaymentPreimage, payment_metadata: Option>, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed @@ -129,6 +131,9 @@ pub(super) struct PendingHTLCInfo { /// may overshoot this in either case) pub(super) outgoing_amt_msat: u64, pub(super) outgoing_cltv_value: u32, + /// The fee being skimmed off the top of this HTLC. If this is a forward, it'll be the fee we are + /// skimming. If we're receiving this HTLC, it's the fee that our counterparty skimmed. + pub(super) skimmed_fee_msat: Option, } #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug @@ -208,6 +213,8 @@ struct ClaimableHTLC { total_value_received: Option, /// The sender intended sum total of all MPP parts specified in the onion total_msat: u64, + /// The extra fee our counterparty skimmed off the top of this HTLC. + counterparty_skimmed_fee_msat: Option, } /// A payment identifier used to uniquely identify a payment to LDK. @@ -605,12 +612,22 @@ impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction, /// State we hold per-peer. pub(super) struct PeerState { - /// `temporary_channel_id` or `channel_id` -> `channel`. + /// `channel_id` -> `Channel`. /// - /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a - /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the - /// `channel_id`. + /// Holds all funded channels where the peer is the counterparty. pub(super) channel_by_id: HashMap<[u8; 32], Channel>, + /// `temporary_channel_id` -> `OutboundV1Channel`. + /// + /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has + /// been assigned a `channel_id`, the entry in this map is removed and one is created in + /// `channel_by_id`. + pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel>, + /// `temporary_channel_id` -> `InboundV1Channel`. + /// + /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has + /// been assigned a `channel_id`, the entry in this map is removed and one is created in + /// `channel_by_id`. + pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, /// Messages to send to the peer - pushed to in the same lock that they are generated in (except @@ -652,6 +669,20 @@ impl PeerState { } self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty() } + + // Returns a count of all channels we have with this peer, including pending channels. + fn total_channel_count(&self) -> usize { + self.channel_by_id.len() + + self.outbound_v1_channel_by_id.len() + + self.inbound_v1_channel_by_id.len() + } + + // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer. + fn has_channel(&self, channel_id: &[u8; 32]) -> bool { + self.channel_by_id.contains_key(channel_id) || + self.outbound_v1_channel_by_id.contains_key(channel_id) || + self.inbound_v1_channel_by_id.contains_key(channel_id) + } } /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is @@ -1373,8 +1404,14 @@ pub struct ChannelDetails { /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us /// to use a limit as close as possible to the HTLC limit we can currently send. /// - /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`]. + /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], + /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. pub next_outbound_htlc_limit_msat: u64, + /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of + /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than + /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a + /// route which is valid. + pub next_outbound_htlc_minimum_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new inbound HTLCs). @@ -1458,53 +1495,54 @@ impl ChannelDetails { self.short_channel_id.or(self.outbound_scid_alias) } - fn from_channel(channel: &Channel, + fn from_channel_context(context: &ChannelContext, best_block_height: u32, latest_features: InitFeatures) -> Self { - let balance = channel.get_available_balances(); + let balance = context.get_available_balances(); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + context.get_holder_counterparty_selected_channel_reserve_satoshis(); ChannelDetails { - channel_id: channel.channel_id(), + channel_id: context.channel_id(), counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), + node_id: context.get_counterparty_node_id(), features: latest_features, unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), + forwarding_info: context.counterparty_forwarding_info(), // Ensures that we have actually received the `htlc_minimum_msat` value // from the counterparty through the `OpenChannel` or `AcceptChannel` // message (as they are always the first message from the counterparty). // Else `Channel::get_counterparty_htlc_minimum_msat` could return the // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + outbound_htlc_minimum_msat: if context.have_received_message() { + Some(context.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(), }, - funding_txo: channel.get_funding_txo(), + funding_txo: context.get_funding_txo(), // Note that accept_channel (or open_channel) is always the first message, so // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), - feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), + channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None }, + short_channel_id: context.get_short_channel_id(), + outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None }, + inbound_scid_alias: context.latest_inbound_scid_alias(), + channel_value_satoshis: context.get_value_satoshis(), + feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, balance_msat: balance.balance_msat, inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), - config: Some(channel.config()), + next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, + user_channel_id: context.get_user_id(), + confirmations_required: context.minimum_depth(), + confirmations: Some(context.get_funding_tx_confirmations(best_block_height)), + force_close_spend_delay: context.get_counterparty_selected_contest_delay(), + is_outbound: context.is_outbound(), + is_channel_ready: context.is_usable(), + is_usable: context.is_live(), + is_public: context.should_announce(), + inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(), + config: Some(context.config()), } } } @@ -1601,14 +1639,23 @@ macro_rules! handle_error { Err(err) }, } - } } + } }; + ($self: ident, $internal: expr) => { + match $internal { + Ok(res) => Ok(res), + Err((chan, msg_handle_err)) => { + let counterparty_node_id = chan.get_counterparty_node_id(); + handle_error!($self, Err(msg_handle_err), counterparty_node_id).map_err(|err| (chan, err)) + }, + } + }; } macro_rules! update_maps_on_chan_removal { - ($self: expr, $channel: expr) => {{ - $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + ($self: expr, $channel_context: expr) => {{ + $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id()); let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $channel.get_short_channel_id() { + if let Some(short_id) = $channel_context.get_short_channel_id() { short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the @@ -1617,10 +1664,10 @@ macro_rules! update_maps_on_chan_removal { // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias()); debug_assert!(alias_removed); } - short_to_chan_info.remove(&$channel.outbound_scid_alias()); + short_to_chan_info.remove(&$channel_context.outbound_scid_alias()); }} } @@ -1636,12 +1683,25 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $channel); - let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + update_maps_on_chan_removal!($self, &$channel.context); + let shutdown_res = $channel.context.force_shutdown(true); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, } + }; + ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, PREFUNDED) => { + match $err { + // We should only ever have `ChannelError::Close` when prefunded channels error. + // In any case, just close the channel. + ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => { + log_error!($self.logger, "Closing prefunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg); + update_maps_on_chan_removal!($self, &$channel_context); + let shutdown_res = $channel_context.force_shutdown(false); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(), + shutdown_res, None)) + }, + } } } @@ -1660,6 +1720,21 @@ macro_rules! break_chan_entry { } } +macro_rules! try_v1_outbound_chan_entry { + ($self: ident, $res: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(e) => { + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), PREFUNDED); + if drop { + $entry.remove_entry(); + } + return Err(res); + } + } + } +} + macro_rules! try_chan_entry { ($self: ident, $res: expr, $entry: expr) => { match $res { @@ -1679,7 +1754,7 @@ macro_rules! remove_channel { ($self: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, channel); + update_maps_on_chan_removal!($self, &channel.context); channel } } @@ -1688,18 +1763,18 @@ macro_rules! remove_channel { macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { - node_id: $channel.get_counterparty_node_id(), + node_id: $channel.context.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), + let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); - if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), + if let Some(real_scid) = $channel.context.get_short_channel_id() { + let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } }} @@ -1707,30 +1782,30 @@ macro_rules! send_channel_ready { macro_rules! emit_channel_pending_event { ($locked_events: expr, $channel: expr) => { - if $channel.should_emit_channel_pending_event() { + if $channel.context.should_emit_channel_pending_event() { $locked_events.push_back((events::Event::ChannelPending { - channel_id: $channel.channel_id(), - former_temporary_channel_id: $channel.temporary_channel_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - user_channel_id: $channel.get_user_id(), - funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(), + channel_id: $channel.context.channel_id(), + former_temporary_channel_id: $channel.context.temporary_channel_id(), + counterparty_node_id: $channel.context.get_counterparty_node_id(), + user_channel_id: $channel.context.get_user_id(), + funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(), }, None)); - $channel.set_channel_pending_event_emitted(); + $channel.context.set_channel_pending_event_emitted(); } } } macro_rules! emit_channel_ready_event { ($locked_events: expr, $channel: expr) => { - if $channel.should_emit_channel_ready_event() { - debug_assert!($channel.channel_pending_event_emitted()); + if $channel.context.should_emit_channel_ready_event() { + debug_assert!($channel.context.channel_pending_event_emitted()); $locked_events.push_back((events::Event::ChannelReady { - channel_id: $channel.channel_id(), - user_channel_id: $channel.get_user_id(), - counterparty_node_id: $channel.get_counterparty_node_id(), - channel_type: $channel.get_channel_type().clone(), + channel_id: $channel.context.channel_id(), + user_channel_id: $channel.context.get_user_id(), + counterparty_node_id: $channel.context.get_counterparty_node_id(), + channel_type: $channel.context.get_channel_type().clone(), }, None)); - $channel.set_channel_ready_event_emitted(); + $channel.context.set_channel_ready_event_emitted(); } } } @@ -1740,8 +1815,8 @@ macro_rules! handle_monitor_update_completion { let mut updates = $chan.monitor_updating_restored(&$self.logger, &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); - let counterparty_node_id = $chan.get_counterparty_node_id(); - let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() { + let counterparty_node_id = $chan.context.get_counterparty_node_id(); + let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a // channel_update later through the announcement_signatures process for public @@ -1756,7 +1831,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.channel_id()).unwrap_or(Vec::new()); + .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); let htlc_forwards = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -1767,7 +1842,7 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.channel_id(); + let channel_id = $chan.context.channel_id(); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -1795,16 +1870,16 @@ macro_rules! handle_new_monitor_update { match $update_res { ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - log_bytes!($chan.channel_id()[..])); + log_bytes!($chan.context.channel_id()[..])); Ok(()) }, ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", - log_bytes!($chan.channel_id()[..])); - update_maps_on_chan_removal!($self, $chan); + log_bytes!($chan.context.channel_id()[..])); + update_maps_on_chan_removal!($self, &$chan.context); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( - "ChannelMonitor storage failure".to_owned(), $chan.channel_id(), - $chan.get_user_id(), $chan.force_shutdown(false), + "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(), + $chan.context.get_user_id(), $chan.context.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok())); $remove; res @@ -2035,7 +2110,7 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, + match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height(), outbound_scid_alias) { @@ -2048,8 +2123,8 @@ where }; let res = channel.get_open_channel(self.genesis_hash.clone()); - let temporary_channel_id = channel.channel_id(); - match peer_state.channel_by_id.entry(temporary_channel_id) { + let temporary_channel_id = channel.context.channel_id(); + match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(fuzzing) { return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); @@ -2067,7 +2142,7 @@ where Ok(temporary_channel_id) } - fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_funded_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside @@ -2082,7 +2157,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { - let details = ChannelDetails::from_channel(channel, best_block_height, + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, peer_state.latest_features.clone()); res.push(details); } @@ -2094,7 +2169,37 @@ where /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for /// more information. pub fn list_channels(&self) -> Vec { - self.list_channels_with_filter(|_| true) + // Allocate our best estimate of the number of channels we have in the `res` + // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without + // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside + // of the ChannelMonitor handling. Therefore reallocations may still occur, but is + // unlikely as the `short_to_chan_info` map often contains 2 entries for + // the same channel. + let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len()); + { + let best_block_height = self.best_block.read().unwrap().height(); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (_channel_id, channel) in peer_state.channel_by_id.iter() { + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, + peer_state.latest_features.clone()); + res.push(details); + } + for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() { + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, + peer_state.latest_features.clone()); + res.push(details); + } + for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() { + let details = ChannelDetails::from_channel_context(&channel.context, best_block_height, + peer_state.latest_features.clone()); + res.push(details); + } + } + } + res } /// Gets the list of usable channels, in random order. Useful as an argument to @@ -2107,7 +2212,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) + self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -2122,7 +2227,7 @@ where return peer_state.channel_by_id .iter() .map(|(_, channel)| - ChannelDetails::from_channel(channel, best_block_height, features.clone())) + ChannelDetails::from_channel_context(&channel.context, best_block_height, features.clone())) .collect(); } vec![] @@ -2157,19 +2262,19 @@ where } /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { + fn issue_channel_close_events(&self, context: &ChannelContext<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); - match channel.unbroadcasted_funding() { + match context.unbroadcasted_funding() { Some(transaction) => { pending_events_lock.push_back((events::Event::DiscardFunding { - channel_id: channel.channel_id(), transaction + channel_id: context.channel_id(), transaction }, None)); }, None => {}, } pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: context.channel_id(), + user_channel_id: context.get_user_id(), reason: closure_reason }, None)); } @@ -2188,7 +2293,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().get_funding_txo(); + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); let their_features = &peer_state.latest_features; let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; @@ -2216,7 +2321,7 @@ where msg: channel_update }); } - self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); + self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -2321,30 +2426,46 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(peer_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; - let mut chan = { + let (update_opt, counterparty_node_id) = { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let closure_reason = if let Some(peer_msg) = peer_msg { + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } + } else { + ClosureReason::HolderForceClosed + }; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }); - } else { - self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); - } - remove_channel!(self, chan) + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + self.issue_channel_close_events(&chan.get().context, closure_reason); + let mut chan = remove_channel!(self, chan); + self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); + (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) + } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) { + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + self.issue_channel_close_events(&chan.get().context, closure_reason); + let mut chan = remove_channel!(self, chan); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + // Prefunded channel has no update + (None, chan.context.get_counterparty_node_id()) + } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) { + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + self.issue_channel_close_events(&chan.get().context, closure_reason); + let mut chan = remove_channel!(self, chan); + self.finish_force_close_channel(chan.context.force_shutdown(false)); + // Prefunded channel has no update + (None, chan.context.get_counterparty_node_id()) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } }; - log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.force_shutdown(broadcast)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + if let Some(update) = update_opt { let mut peer_state = peer_state_mutex.lock().unwrap(); peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - Ok(chan.get_counterparty_node_id()) + Ok(counterparty_node_id) } fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> { @@ -2405,9 +2526,10 @@ where } } - fn construct_recv_pending_htlc_info(&self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], - payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result - { + fn construct_recv_pending_htlc_info( + &self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32], payment_hash: PaymentHash, + amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool + ) -> Result { // final_incorrect_cltv_expiry if hop_data.outgoing_cltv_value > cltv_expiry { return Err(ReceiveError { @@ -2433,7 +2555,7 @@ where msg: "The final CLTV expiry is too soon to handle", }); } - if hop_data.amt_to_forward > amt_msat { + if !allow_underpay && hop_data.amt_to_forward > amt_msat { return Err(ReceiveError { err_code: 19, err_data: amt_msat.to_be_bytes().to_vec(), @@ -2450,20 +2572,7 @@ where }); }, msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => { - if payment_data.is_some() && keysend_preimage.is_some() { - return Err(ReceiveError { - err_code: 0x4000|22, - err_data: Vec::new(), - msg: "We don't support MPP keysend payments", - }); - } else if let Some(data) = payment_data { - PendingHTLCRouting::Receive { - payment_data: data, - payment_metadata, - incoming_cltv_expiry: hop_data.outgoing_cltv_value, - phantom_shared_secret, - } - } else if let Some(payment_preimage) = keysend_preimage { + if let Some(payment_preimage) = keysend_preimage { // We need to check that the sender knows the keysend preimage before processing this // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X // could discover the final destination of X, by probing the adjacent nodes on the route @@ -2477,12 +2586,26 @@ where msg: "Payment preimage didn't match payment hash", }); } - + if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() { + return Err(ReceiveError { + err_code: 0x4000|22, + err_data: Vec::new(), + msg: "We don't support MPP keysend payments", + }); + } PendingHTLCRouting::ReceiveKeysend { + payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry: hop_data.outgoing_cltv_value, } + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + payment_metadata, + incoming_cltv_expiry: hop_data.outgoing_cltv_value, + phantom_shared_secret, + } } else { return Err(ReceiveError { err_code: 0x4000|0x2000|3, @@ -2499,15 +2622,18 @@ where incoming_amt_msat: Some(amt_msat), outgoing_amt_msat: hop_data.amt_to_forward, outgoing_cltv_value: hop_data.outgoing_cltv_value, + skimmed_fee_msat: None, }) } - fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus { + fn decode_update_add_htlc_onion( + &self, msg: &msgs::UpdateAddHTLC + ) -> Result<(onion_utils::Hop, [u8; 32], Option>), HTLCFailureMsg> { macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(), @@ -2538,7 +2664,7 @@ where ($msg: expr, $err_code: expr, $data: expr) => { { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, reason: HTLCFailReason::reason($err_code, $data.to_vec()) @@ -2557,11 +2683,186 @@ where return_err!(err_msg, err_code, &[0; 0]); }, }; + let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value, next_packet_pk_opt) = match next_hop { + onion_utils::Hop::Forward { + next_hop_data: msgs::OnionHopData { + format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward, + outgoing_cltv_value, + }, .. + } => { + let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx, + msg.onion_routing_packet.public_key.unwrap(), &shared_secret); + (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk)) + }, + // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the + // inbound channel's state. + onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)), + onion_utils::Hop::Forward { + next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, .. + } => { + return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]); + } + }; + + // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we + // can't hold the outbound peer state lock at the same time as the inbound peer state lock. + if let Some((err, mut code, chan_update)) = loop { + let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned(); + let forwarding_chan_info_opt = match id_option { + None => { // unknown_next_peer + // Note that this is likely a timing oracle for detecting whether an scid is a + // phantom or an intercept. + if (self.default_configuration.accept_intercept_htlcs && + fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) || + fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash) + { + None + } else { + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + } + }, + Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), + }; + let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if peer_state_mutex_opt.is_none() { + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + } + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) { + None => { + // Channel was removed. The short_to_chan_info and channel_by_id maps + // have no consistency guarantees. + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + }, + Some(chan) => chan + }; + if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + // Note that the behavior here should be identical to the above block - we + // should NOT reveal the existence or non-existence of a private channel if + // we don't allow forwards outbound over them. + break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); + } + if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { + // `option_scid_alias` (referred to in LDK as `scid_privacy`) means + // "refuse to forward unless the SCID alias was used", so we pretend + // we don't have the channel here. + break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None)); + } + let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok(); + + // Note that we could technically not return an error yet here and just hope + // that the connection is reestablished or monitor updated by the time we get + // around to doing the actual forward, but better to fail early if we can and + // hopefully an attacker trying to path-trace payments cannot make this occur + // on a small/per-node/per-channel scale. + if !chan.context.is_live() { // channel_disabled + // If the channel_update we're going to return is disabled (i.e. the + // peer has been disabled for some time), return `channel_disabled`, + // otherwise return `temporary_channel_failure`. + if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { + break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); + } else { + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); + } + } + if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum + break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); + } + if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) { + break Some((err, code, chan_update_opt)); + } + chan_update_opt + } else { + if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { + // We really should set `incorrect_cltv_expiry` here but as we're not + // forwarding over a real channel we can't generate a channel_update + // for it. Instead we just return a generic temporary_node_failure. + break Some(( + "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", + 0x2000 | 2, None, + )); + } + None + }; + + let cur_height = self.best_block.read().unwrap().height() + 1; + // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, + // but we want to be robust wrt to counterparty packet sanitization (see + // HTLC_FAIL_BACK_BUFFER rationale). + if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon + break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt)); + } + if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far + break Some(("CLTV expiry is too far in the future", 21, None)); + } + // If the HTLC expires ~now, don't bother trying to forward it to our + // counterparty. They should fail it anyway, but we don't want to bother with + // the round-trips or risk them deciding they definitely want the HTLC and + // force-closing to ensure they get it if we're offline. + // We previously had a much more aggressive check here which tried to ensure + // our counterparty receives an HTLC which has *our* risk threshold met on it, + // but there is no need to do that, and since we're a bit conservative with our + // risk threshold it just results in failing to forward payments. + if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { + break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt)); + } - let pending_forward_info = match next_hop { + break None; + } + { + let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); + if let Some(chan_update) = chan_update { + if code == 0x1000 | 11 || code == 0x1000 | 12 { + msg.amount_msat.write(&mut res).expect("Writes cannot fail"); + } + else if code == 0x1000 | 13 { + msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); + } + else if code == 0x1000 | 20 { + // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 + 0u16.write(&mut res).expect("Writes cannot fail"); + } + (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); + msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); + chan_update.write(&mut res).expect("Writes cannot fail"); + } else if code & 0x1000 == 0x1000 { + // If we're trying to return an error that requires a `channel_update` but + // we're forwarding to a phantom or intercept "channel" (i.e. cannot + // generate an update), just use the generic "temporary_node_failure" + // instead. + code = 0x2000 | 2; + } + return_err!(err, code, &res.0[..]); + } + Ok((next_hop, shared_secret, next_packet_pk_opt)) + } + + fn construct_pending_htlc_status<'a>( + &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop, + allow_underpay: bool, next_packet_pubkey_opt: Option> + ) -> PendingHTLCStatus { + macro_rules! return_err { + ($msg: expr, $err_code: expr, $data: expr) => { + { + log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason: HTLCFailReason::reason($err_code, $data.to_vec()) + .get_encrypted_failure_packet(&shared_secret, &None), + })); + } + } + } + match decoded_hop { onion_utils::Hop::Receive(next_hop_data) => { // OUR PAYMENT! - match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) { + match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, + msg.amount_msat, msg.cltv_expiry, None, allow_underpay) + { Ok(info) => { // Note that we could obviously respond immediately with an update_fulfill_htlc // message, however that would leak that we are the recipient of this payment, so @@ -2573,10 +2874,10 @@ where } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { - let new_pubkey = msg.onion_routing_packet.public_key.unwrap(); + debug_assert!(next_packet_pubkey_opt.is_some()); let outgoing_packet = msgs::OnionPacket { version: 0, - public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret), + public_key: next_packet_pubkey_opt.unwrap_or(Err(secp256k1::Error::InvalidPublicKey)), hop_data: new_packet_bytes, hmac: next_hop_hmac.clone(), }; @@ -2598,150 +2899,10 @@ where incoming_amt_msat: Some(msg.amount_msat), outgoing_amt_msat: next_hop_data.amt_to_forward, outgoing_cltv_value: next_hop_data.outgoing_cltv_value, + skimmed_fee_msat: None, }) } - }; - - if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info { - // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel - // with a short_channel_id of 0. This is important as various things later assume - // short_channel_id is non-0 in any ::Forward. - if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { - if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned(); - let forwarding_chan_info_opt = match id_option { - None => { // unknown_next_peer - // Note that this is likely a timing oracle for detecting whether an scid is a - // phantom or an intercept. - if (self.default_configuration.accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash) - { - None - } else { - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - } - }, - Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), - }; - let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); - if peer_state_mutex_opt.is_none() { - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - } - let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); - let peer_state = &mut *peer_state_lock; - let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) { - None => { - // Channel was removed. The short_to_chan_info and channel_by_id maps - // have no consistency guarantees. - break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); - }, - Some(chan) => chan - }; - if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { - // Note that the behavior here should be identical to the above block - we - // should NOT reveal the existence or non-existence of a private channel if - // we don't allow forwards outbound over them. - break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None)); - } - if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() { - // `option_scid_alias` (referred to in LDK as `scid_privacy`) means - // "refuse to forward unless the SCID alias was used", so we pretend - // we don't have the channel here. - break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None)); - } - let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok(); - - // Note that we could technically not return an error yet here and just hope - // that the connection is reestablished or monitor updated by the time we get - // around to doing the actual forward, but better to fail early if we can and - // hopefully an attacker trying to path-trace payments cannot make this occur - // on a small/per-node/per-channel scale. - if !chan.is_live() { // channel_disabled - // If the channel_update we're going to return is disabled (i.e. the - // peer has been disabled for some time), return `channel_disabled`, - // otherwise return `temporary_channel_failure`. - if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) { - break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt)); - } else { - break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt)); - } - } - if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt)); - } - if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) { - break Some((err, code, chan_update_opt)); - } - chan_update_opt - } else { - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { - // We really should set `incorrect_cltv_expiry` here but as we're not - // forwarding over a real channel we can't generate a channel_update - // for it. Instead we just return a generic temporary_node_failure. - break Some(( - "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", - 0x2000 | 2, None, - )); - } - None - }; - - let cur_height = self.best_block.read().unwrap().height() + 1; - // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, - // but we want to be robust wrt to counterparty packet sanitization (see - // HTLC_FAIL_BACK_BUFFER rationale). - if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon - break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt)); - } - if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far - break Some(("CLTV expiry is too far in the future", 21, None)); - } - // If the HTLC expires ~now, don't bother trying to forward it to our - // counterparty. They should fail it anyway, but we don't want to bother with - // the round-trips or risk them deciding they definitely want the HTLC and - // force-closing to ensure they get it if we're offline. - // We previously had a much more aggressive check here which tried to ensure - // our counterparty receives an HTLC which has *our* risk threshold met on it, - // but there is no need to do that, and since we're a bit conservative with our - // risk threshold it just results in failing to forward payments. - if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { - break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt)); - } - - break None; - } - { - let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); - if let Some(chan_update) = chan_update { - if code == 0x1000 | 11 || code == 0x1000 | 12 { - msg.amount_msat.write(&mut res).expect("Writes cannot fail"); - } - else if code == 0x1000 | 13 { - msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); - } - else if code == 0x1000 | 20 { - // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 - 0u16.write(&mut res).expect("Writes cannot fail"); - } - (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); - msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); - chan_update.write(&mut res).expect("Writes cannot fail"); - } else if code & 0x1000 == 0x1000 { - // If we're trying to return an error that requires a `channel_update` but - // we're forwarding to a phantom or intercept "channel" (i.e. cannot - // generate an update), just use the generic "temporary_node_failure" - // instead. - code = 0x2000 | 2; - } - return_err!(err, code, &res.0[..]); - } - } } - - pending_forward_info } /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is @@ -2755,16 +2916,16 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { - if !chan.should_announce() { + if !chan.context.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), action: msgs::ErrorAction::IgnoreError }); } - if chan.get_short_channel_id().is_none() { + if chan.context.get_short_channel_id().is_none() { return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}); } - log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); + log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id())); self.get_channel_update_for_unicast(chan) } @@ -2780,19 +2941,20 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_unicast(&self, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); - let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) { + log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id())); + let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; self.get_channel_update_for_onion(short_channel_id, chan) } + fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<::Signer>) -> Result { - log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); - let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; + log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id())); + let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; - let enabled = chan.is_usable() && match chan.channel_update_status() { + let enabled = chan.context.is_usable() && match chan.channel_update_status() { ChannelUpdateStatus::Enabled => true, ChannelUpdateStatus::DisabledStaged(_) => true, ChannelUpdateStatus::Disabled => false, @@ -2802,13 +2964,13 @@ where let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, short_channel_id, - timestamp: chan.get_update_time_counter(), + timestamp: chan.context.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), - cltv_expiry_delta: chan.get_cltv_expiry_delta(), - htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), - htlc_maximum_msat: chan.get_announced_htlc_max_msat(), - fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), - fee_proportional_millionths: chan.get_fee_proportional_millionths(), + cltv_expiry_delta: chan.context.get_cltv_expiry_delta(), + htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(), + htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(), + fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(), + fee_proportional_millionths: chan.context.get_fee_proportional_millionths(), excess_data: Vec::new(), }; // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`. @@ -2856,17 +3018,17 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { - if !chan.get().is_live() { + if !chan.get().context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } - let funding_txo = chan.get().get_funding_txo().unwrap(); + let funding_txo = chan.get().context.get_funding_txo().unwrap(); let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { path: path.clone(), session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, payment_id, - }, onion_packet, &self.logger); + }, onion_packet, None, &self.logger); match break_chan_entry!(self, send_res, chan) { Some(monitor_update) => { let update_id = monitor_update.update_id; @@ -3032,8 +3194,6 @@ where /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See /// [`send_payment`] for more information about the risks of duplicate preimage usage. /// - /// Note that `route` must have exactly one path. - /// /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); @@ -3083,7 +3243,7 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -3092,21 +3252,24 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) { - Some(mut chan) => { + let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) { + Some(chan) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { + let channel_id = chan.context.channel_id(); + let user_id = chan.context.get_user_id(); + let shutdown_res = chan.context.force_shutdown(false); + (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None)) } else { unreachable!(); }); match funding_res { - Ok(funding_msg) => (funding_msg, chan), - Err(_) => { + Ok((chan, funding_msg)) => (chan, funding_msg), + Err((chan, err)) => { mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id()); + let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id()); return Err(APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() }); @@ -3123,16 +3286,16 @@ where }; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), msg, }); - match peer_state.channel_by_id.entry(chan.channel_id()) { + match peer_state.channel_by_id.entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); }, hash_map::Entry::Vacant(e) => { let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { + if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() { panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); } e.insert(chan); @@ -3208,9 +3371,9 @@ where } let mut output_index = None; - let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); + let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() { + if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { if output_index.is_some() { return Err(APIError::APIMisuseError { err: "Multiple outputs matched the expected script and value".to_owned() @@ -3228,7 +3391,7 @@ where }) } - /// Atomically updates the [`ChannelConfig`] for the given channels. + /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels. /// /// Once the updates are applied, each eligible channel (advertised with a known short channel /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`], @@ -3250,10 +3413,10 @@ where /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`ChannelUnavailable`]: APIError::ChannelUnavailable /// [`APIMisuseError`]: APIError::APIMisuseError - pub fn update_channel_config( - &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig, + pub fn update_partial_channel_config( + &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate, ) -> Result<(), APIError> { - if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA { + if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) { return Err(APIError::APIMisuseError { err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA), }); @@ -3274,14 +3437,16 @@ where } for channel_id in channel_ids { let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap(); - if !channel.update_config(config) { + let mut config = channel.context.config(); + config.apply(config_update); + if !channel.context.update_config(&config) { continue; } if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg, }); } @@ -3289,6 +3454,34 @@ where Ok(()) } + /// Atomically updates the [`ChannelConfig`] for the given channels. + /// + /// Once the updates are applied, each eligible channel (advertised with a known short channel + /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`], + /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated + /// containing the new [`ChannelUpdate`] message which should be broadcast to the network. + /// + /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect + /// `counterparty_node_id` is provided. + /// + /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value + /// below [`MIN_CLTV_EXPIRY_DELTA`]. + /// + /// If an error is returned, none of the updates should be considered applied. + /// + /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths + /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat + /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta + /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate + /// [`ChannelUpdate`]: msgs::ChannelUpdate + /// [`ChannelUnavailable`]: APIError::ChannelUnavailable + /// [`APIMisuseError`]: APIError::APIMisuseError + pub fn update_channel_config( + &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig, + ) -> Result<(), APIError> { + return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into()); + } + /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event. /// @@ -3302,13 +3495,16 @@ where /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event. /// /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop - /// you from forwarding more than you received. + /// you from forwarding more than you received. See + /// [`HTLCIntercepted::expected_outbound_amount_msat`] for more on forwarding a different amount + /// than expected. /// /// Errors if the event was not handled in time, in which case the HTLC was automatically failed /// backwards. /// /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted + /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat // TODO: when we move to deciding the best outbound channel at forward time, only take // `next_node_id` and not `next_hop_channel_id` pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> { @@ -3322,15 +3518,16 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { Some(chan) => { - if !chan.is_usable() { + if !chan.context.is_usable() { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) }) } - chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias()) + chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) }, None => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id) + err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.", + log_bytes!(*next_hop_channel_id), next_node_id) }) } }; @@ -3346,7 +3543,10 @@ where }, _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted }; + let skimmed_fee_msat = + payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat); let pending_htlc_info = PendingHTLCInfo { + skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) }, outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info }; @@ -3416,7 +3616,7 @@ where prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, - outgoing_cltv_value, incoming_amt_msat: _ + outgoing_cltv_value, .. } }) => { macro_rules! failure_handler { @@ -3478,7 +3678,10 @@ where }; match next_hop { onion_utils::Hop::Receive(hop_data) => { - match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) { + match self.construct_recv_pending_htlc_info(hop_data, + incoming_shared_secret, payment_hash, outgoing_amt_msat, + outgoing_cltv_value, Some(phantom_shared_secret), false) + { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } @@ -3529,7 +3732,7 @@ where prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, + routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, .. }, }) => { log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); @@ -3543,7 +3746,7 @@ where }); if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat, payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet, &self.logger) + onion_packet, skimmed_fee_msat, &self.logger) { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); @@ -3553,7 +3756,7 @@ where let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id } )); continue; } @@ -3587,7 +3790,8 @@ where HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { - routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, .. + routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, + skimmed_fee_msat, .. } }) => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing { @@ -3598,16 +3802,19 @@ where (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret, onion_fields) }, - PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_metadata, incoming_cltv_expiry } => { - let onion_fields = RecipientOnionFields { payment_secret: None, payment_metadata }; + PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => { + let onion_fields = RecipientOnionFields { + payment_secret: payment_data.as_ref().map(|data| data.payment_secret), + payment_metadata + }; (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), - None, None, onion_fields) + payment_data, None, onion_fields) }, _ => { panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); } }; - let mut claimable_htlc = ClaimableHTLC { + let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -3625,6 +3832,7 @@ where total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat }, cltv_expiry, onion_payload, + counterparty_skimmed_fee_msat: skimmed_fee_msat, }; let mut committed_to_claimable = false; @@ -3657,13 +3865,11 @@ where } macro_rules! check_total_value { - ($payment_data: expr, $payment_preimage: expr) => {{ + ($purpose: expr) => {{ let mut payment_claimable_generated = false; - let purpose = || { - events::PaymentPurpose::InvoicePayment { - payment_preimage: $payment_preimage, - payment_secret: $payment_data.payment_secret, - } + let is_keysend = match $purpose { + events::PaymentPurpose::SpontaneousPayment(_) => true, + events::PaymentPurpose::InvoicePayment { .. } => false, }; let mut claimable_payments = self.claimable_payments.lock().unwrap(); if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { @@ -3675,9 +3881,18 @@ where .or_insert_with(|| { committed_to_claimable = true; ClaimablePayment { - purpose: purpose(), htlcs: Vec::new(), onion_fields: None, + purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None, } }); + if $purpose != claimable_payment.purpose { + let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" }; + log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), log_bytes!(payment_hash.0), log_keysend(!is_keysend)); + fail_htlc!(claimable_htlc, payment_hash); + } + if !self.default_configuration.accept_mpp_keysend && is_keysend && !claimable_payment.htlcs.is_empty() { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash and our config states we don't accept MPP keysend", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc, payment_hash); + } if let Some(earlier_fields) = &mut claimable_payment.onion_fields { if earlier_fields.check_merge(&mut onion_fields).is_err() { fail_htlc!(claimable_htlc, payment_hash); @@ -3686,38 +3901,27 @@ where claimable_payment.onion_fields = Some(onion_fields); } let ref mut htlcs = &mut claimable_payment.htlcs; - if htlcs.len() == 1 { - if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc, payment_hash); - } - } let mut total_value = claimable_htlc.sender_intended_value; let mut earliest_expiry = claimable_htlc.cltv_expiry; for htlc in htlcs.iter() { total_value += htlc.sender_intended_value; earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry); - match &htlc.onion_payload { - OnionPayload::Invoice { .. } => { - if htlc.total_msat != $payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat); - total_value = msgs::MAX_VALUE_MSAT; - } - if total_value >= msgs::MAX_VALUE_MSAT { break; } - }, - _ => unreachable!(), + if htlc.total_msat != claimable_htlc.total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), claimable_htlc.total_msat, htlc.total_msat); + total_value = msgs::MAX_VALUE_MSAT; } + if total_value >= msgs::MAX_VALUE_MSAT { break; } } // The condition determining whether an MPP is complete must // match exactly the condition used in `timer_tick_occurred` if total_value >= msgs::MAX_VALUE_MSAT { fail_htlc!(claimable_htlc, payment_hash); - } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat { + } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat { log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); - } else if total_value >= $payment_data.total_msat { + } else if total_value >= claimable_htlc.total_msat { #[allow(unused_assignments)] { committed_to_claimable = true; } @@ -3725,11 +3929,14 @@ where htlcs.push(claimable_htlc); let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum(); htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat)); + let counterparty_skimmed_fee_msat = htlcs.iter() + .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum(); new_events.push_back((events::Event::PaymentClaimable { receiver_node_id: Some(receiver_node_id), payment_hash, - purpose: purpose(), + purpose: $purpose, amount_msat, + counterparty_skimmed_fee_msat, via_channel_id: Some(prev_channel_id), via_user_channel_id: Some(prev_user_channel_id), claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER), @@ -3776,49 +3983,23 @@ where fail_htlc!(claimable_htlc, payment_hash); } } - check_total_value!(payment_data, payment_preimage); + let purpose = events::PaymentPurpose::InvoicePayment { + payment_preimage: payment_preimage.clone(), + payment_secret: payment_data.payment_secret, + }; + check_total_value!(purpose); }, OnionPayload::Spontaneous(preimage) => { - let mut claimable_payments = self.claimable_payments.lock().unwrap(); - if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) { - fail_htlc!(claimable_htlc, payment_hash); - } - match claimable_payments.claimable_payments.entry(payment_hash) { - hash_map::Entry::Vacant(e) => { - let amount_msat = claimable_htlc.value; - claimable_htlc.total_value_received = Some(amount_msat); - let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER); - let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); - e.insert(ClaimablePayment { - purpose: purpose.clone(), - onion_fields: Some(onion_fields.clone()), - htlcs: vec![claimable_htlc], - }); - let prev_channel_id = prev_funding_outpoint.to_channel_id(); - new_events.push_back((events::Event::PaymentClaimable { - receiver_node_id: Some(receiver_node_id), - payment_hash, - amount_msat, - purpose, - via_channel_id: Some(prev_channel_id), - via_user_channel_id: Some(prev_user_channel_id), - claim_deadline, - onion_fields: Some(onion_fields), - }, None)); - }, - hash_map::Entry::Occupied(_) => { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc, payment_hash); - } - } + let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); + check_total_value!(purpose); } } }, hash_map::Entry::Occupied(inbound_payment) => { - if payment_data.is_none() { + if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc, payment_hash); - }; + } let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); @@ -3828,7 +4009,11 @@ where log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc, payment_hash); } else { - let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); + let purpose = events::PaymentPurpose::InvoicePayment { + payment_preimage: inbound_payment.get().payment_preimage, + payment_secret: payment_data.payment_secret, + }; + let payment_claimable_generated = check_total_value!(purpose); if payment_claimable_generated { inbound_payment.remove_entry(); } @@ -3928,20 +4113,20 @@ where } fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { - if !chan.is_outbound() { return NotifyOption::SkipPersist; } + if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother - if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { + if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } - if !chan.is_live() { + if !chan.context.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", - log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); + log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.logger); NotifyOption::DoPersist @@ -4015,13 +4200,13 @@ where } match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), - ChannelUpdateStatus::DisabledStaged(_) if chan.is_live() + ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), + ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), + ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live() + ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => { + ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => { n += 1; if n >= DISABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Disabled); @@ -4035,7 +4220,7 @@ where chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n)); } }, - ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => { + ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => { n += 1; if n >= ENABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Enabled); @@ -4052,7 +4237,7 @@ where _ => {}, } - chan.maybe_expire_prev_config(); + chan.context.maybe_expire_prev_config(); if chan.should_disconnect_peer_awaiting_response() { log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}", @@ -4212,10 +4397,10 @@ where // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use // an inbound SCID alias before the real SCID. - let scid_pref = if chan.should_announce() { - chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) + let scid_pref = if chan.context.should_announce() { + chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) } else { - chan.latest_inbound_scid_alias().or(chan.get_short_channel_id()) + chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id()) }; if let Some(scid) = scid_pref { self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan) @@ -4408,18 +4593,6 @@ where break; } expected_amt_msat = htlc.total_value_received; - - if let OnionPayload::Spontaneous(_) = &htlc.onion_payload { - // We don't currently support MPP for spontaneous payments, so just check - // that there's one payment here and move on. - if sources.len() != 1 { - log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!"); - debug_assert!(false); - valid_mpp = false; - break; - } - } - claimable_amt_msat += htlc.value; } mem::drop(per_peer_state); @@ -4489,7 +4662,7 @@ where let mut peer_state_lock = peer_state_opt.unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let counterparty_node_id = chan.get().get_counterparty_node_id(); + let counterparty_node_id = chan.get().context.get_counterparty_node_id(); let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { @@ -4615,7 +4788,7 @@ where channel_ready: Option, announcement_sigs: Option) -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", - log_bytes!(channel.channel_id()), + log_bytes!(channel.context.channel_id()), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), if funding_broadcastable.is_some() { "" } else { "not " }, @@ -4624,10 +4797,10 @@ where let mut htlc_forwards = None; - let counterparty_node_id = channel.get_counterparty_node_id(); + let counterparty_node_id = channel.context.get_counterparty_node_id(); if !pending_forwards.is_empty() { - htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()), - channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards)); + htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), + channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards)); } if let Some(msg) = channel_ready { @@ -4709,8 +4882,8 @@ where } }; log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", - highest_applied_update_id, channel.get().get_latest_monitor_update_id()); - if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { + highest_applied_update_id, channel.get().context.get_latest_monitor_update_id()); + if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id { return; } handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); @@ -4761,23 +4934,24 @@ where fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty()); + let peers_without_funded_channels = + self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 }); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let is_only_peer_channel = peer_state.channel_by_id.len() == 1; - match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { + let is_only_peer_channel = peer_state.total_channel_count() == 1; + match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut channel) => { - if !channel.get().inbound_is_awaiting_accept() { + if !channel.get().is_awaiting_accept() { return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); } if accept_0conf { channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { + } else if channel.get().context.get_channel_type().requires_zero_conf() { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } @@ -4791,7 +4965,7 @@ where // channels per-peer we can accept channels from a peer with existing ones. if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } } @@ -4803,7 +4977,7 @@ where } peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get().context.get_counterparty_node_id(), msg: channel.get_mut().accept_inbound_channel(user_channel_id), }); } @@ -4829,7 +5003,7 @@ where let peer = peer_mtx.lock().unwrap(); if !maybe_count_peer(&*peer) { continue; } let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height); - if num_unfunded_channels == peer.channel_by_id.len() { + if num_unfunded_channels == peer.total_channel_count() { peers_without_funded_channels += 1; } } @@ -4842,12 +5016,19 @@ where ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { - if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && - chan.get_funding_tx_confirmations(best_block_height) == 0 + // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those + // which have not yet had any confirmations on-chain. + if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 && + chan.context.get_funding_tx_confirmations(best_block_height) == 0 { num_unfunded_channels += 1; } } + for (_, chan) in peer.inbound_v1_channel_by_id.iter() { + if chan.context.minimum_depth().unwrap_or(1) != 0 { + num_unfunded_channels += 1; + } + } num_unfunded_channels } @@ -4868,7 +5049,8 @@ where // Get the number of peers with channels, but without funded ones. We don't care too much // about peers that never open a channel, so we filter by peers that have at least one // channel, and then limit the number of those with unfunded channels. - let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty()); + let channeled_peers_without_funding = + self.peers_without_funded_channels(|node| node.total_channel_count() > 0); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -4882,7 +5064,7 @@ where // If this peer already has some channels, a new channel won't increase our number of peers // with unfunded channels, so as long as we aren't over the maximum number of unfunded // channels per-peer we can accept channels from a peer with existing ones. - if peer_state.channel_by_id.is_empty() && + if peer_state.total_channel_count() == 0 && channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS && !self.default_configuration.manually_accept_inbound_channels { @@ -4898,7 +5080,7 @@ where msg.temporary_channel_id.clone())); } - let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + let mut channel = match InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias) { @@ -4908,33 +5090,31 @@ where }, Ok(res) => res }; - match peer_state.channel_by_id.entry(channel.channel_id()) { - hash_map::Entry::Occupied(_) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) - }, - hash_map::Entry::Vacant(entry) => { - if !self.default_configuration.manually_accept_inbound_channels { - if channel.get_channel_type().requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); - } - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(user_channel_id), - }); - } else { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), - counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, - push_msat: msg.push_msat, - channel_type: channel.get_channel_type().clone(), - }, None)); + let channel_id = channel.context.channel_id(); + let channel_exists = peer_state.has_channel(&channel_id); + if channel_exists { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) + } else { + if !self.default_configuration.manually_accept_inbound_channels { + if channel.context.get_channel_type().requires_zero_conf() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - - entry.insert(channel); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg: channel.accept_inbound_channel(user_channel_id), + }); + } else { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::OpenChannelRequest { + temporary_channel_id: msg.temporary_channel_id.clone(), + counterparty_node_id: counterparty_node_id.clone(), + funding_satoshis: msg.funding_satoshis, + push_msat: msg.push_msat, + channel_type: channel.context.get_channel_type().clone(), + }, None)); } + peer_state.inbound_v1_channel_by_id.insert(channel_id, channel); } Ok(()) } @@ -4949,10 +5129,10 @@ where })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) { hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); + (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -4980,12 +5160,24 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let ((funding_msg, monitor), chan) = - match peer_state.channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) + let (chan, funding_msg, monitor) = + match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) { + Some(inbound_chan) => { + match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) { + Ok(res) => res, + Err((mut inbound_chan, err)) => { + // We've already removed this inbound channel from the map in `PeerState` + // above so at this point we just need to clean up any lingering entries + // concerning this channel as it is safe to do so. + update_maps_on_chan_removal!(self, &inbound_chan.context); + let user_id = inbound_chan.context.get_user_id(); + let shutdown_res = inbound_chan.context.force_shutdown(false); + return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err), + msg.temporary_channel_id, user_id, shutdown_res, None)); + }, + } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; match peer_state.channel_by_id.entry(funding_msg.channel_id) { @@ -4993,14 +5185,14 @@ where Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { - match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) { + match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(i_e) => { - i_e.insert(chan.get_counterparty_node_id()); + i_e.insert(chan.context.get_counterparty_node_id()); } } @@ -5050,7 +5242,7 @@ where hash_map::Entry::Occupied(mut chan) => { let monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan); - let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor); + let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor); let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan); if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { // We weren't able to watch the channel to begin with, so no updates should be made on @@ -5080,18 +5272,18 @@ where let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id())); peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If we're sending an announcement_signatures, we'll send the (public) // channel_update after sending a channel_announcement when we receive our // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -5131,7 +5323,7 @@ where if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); } - let funding_txo_opt = chan_entry.get().get_funding_txo(); + let funding_txo_opt = chan_entry.get().context.get_funding_txo(); let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); dropped_htlcs = htlcs; @@ -5209,7 +5401,7 @@ where msg: update }); } - self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); } Ok(()) } @@ -5224,7 +5416,7 @@ where //encrypted with the same key. It's not immediately obvious how to usefully exploit that, //but we should prevent it anyway. - let pending_forward_info = self.decode_update_add_htlc_onion(msg); + let decoded_hop_res = self.decode_update_add_htlc_onion(msg); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -5236,6 +5428,12 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { + let pending_forward_info = match decoded_hop_res { + Ok((next_hop, shared_secret, next_packet_pk_opt)) => + self.construct_pending_htlc_status(msg, shared_secret, next_hop, + chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt), + Err(e) => PendingHTLCStatus::Fail(e) + }; let create_pending_htlc_status = |chan: &Channel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just @@ -5337,7 +5535,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); + let funding_txo = chan.get().context.get_funding_txo(); let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan); if let Some(monitor_update) = monitor_update_opt { let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); @@ -5476,7 +5674,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); + let funding_txo = chan.get().context.get_funding_txo(); let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); let res = if let Some(monitor_update) = monitor_update_opt { let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); @@ -5522,7 +5720,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_usable() { + if !chan.get().context.is_usable() { return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } @@ -5559,8 +5757,8 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { + if chan.get().context.get_counterparty_node_id() != *counterparty_node_id { + if chan.get().context.should_announce() { // If the announcement is about a channel of ours which is public, some // other peer may simply be forwarding all its gossip to us. Don't provide // a scary-looking error message and return Ok instead. @@ -5568,7 +5766,7 @@ where } return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..]; let msg_from_node_one = msg.contents.flags & 1 == 0; if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); @@ -5609,18 +5807,18 @@ where node_id: counterparty_node_id.clone(), msg, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If the channel is in a usable state (ie the channel is not being shut // down), send a unicast channel_update to our counterparty to make sure // they have the latest channel parameters. if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), + node_id: chan.get().context.get_counterparty_node_id(), msg, }); } } - let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take(); htlc_forwards = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); @@ -5683,7 +5881,7 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { let mut chan = remove_channel!(self, chan_entry); - failed_channels.push(chan.force_shutdown(false)); + failed_channels.push(chan.context.force_shutdown(false)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -5694,11 +5892,11 @@ where } else { ClosureReason::CommitmentTxConfirmed }; - self.issue_channel_close_events(&chan, reason); + self.issue_channel_close_events(&chan.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() } }, }); } @@ -5747,8 +5945,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state: &mut PeerState<_> = &mut *peer_state_lock; for (channel_id, chan) in peer_state.channel_by_id.iter_mut() { - let counterparty_node_id = chan.get_counterparty_node_id(); - let funding_txo = chan.get_funding_txo(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + let funding_txo = chan.context.get_funding_txo(); let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs(&self.logger); if !holding_cell_failed_htlcs.is_empty() { @@ -5807,7 +6005,7 @@ where if let Some(msg) = msg_opt { has_update = true; pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: chan.get_counterparty_node_id(), msg, + node_id: chan.context.get_counterparty_node_id(), msg, }); } if let Some(tx) = tx_opt { @@ -5819,18 +6017,18 @@ where }); } - self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); - update_maps_on_chan_removal!(self, chan); + update_maps_on_chan_removal!(self, &chan.context); false } else { true } }, Err(e) => { has_update = true; let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + handle_errors.push((chan.context.get_counterparty_node_id(), Err(res))); !close_channel } } @@ -6161,7 +6359,7 @@ where } if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { - debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint); + debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() { log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor", log_bytes!(&channel_funding_outpoint.to_channel_id()[..])); @@ -6417,7 +6615,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for chan in peer_state.channel_by_id.values() { - if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { + if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) { res.push((funding_txo.txid, Some(block_hash))); } } @@ -6429,7 +6627,7 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist }); self.do_chain_event(None, |channel| { - if let Some(funding_txo) = channel.get_funding_txo() { + if let Some(funding_txo) = channel.context.get_funding_txo() { if funding_txo.txid == *txid { channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None)) } else { Ok((None, Vec::new(), None)) } @@ -6472,20 +6670,20 @@ where for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); - if channel.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + if channel.context.is_usable() { + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg, }); } } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id())); } } @@ -6495,9 +6693,9 @@ where } if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), msg: announcement_sigs, }); if let Some(height) = height_opt { @@ -6512,7 +6710,7 @@ where } } if channel.is_our_channel_ready() { - if let Some(real_scid) = channel.get_short_channel_id() { + if let Some(real_scid) = channel.context.get_short_channel_id() { // If we sent a 0conf channel_ready, and now have an SCID, we add it // to the short_to_chan_info map here. Note that we check whether we // can relay using the real SCID at relay-time (i.e. @@ -6520,28 +6718,28 @@ where // un-confirmed we force-close the channel, ensuring short_to_chan_info // is always consistent. let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); - let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), + let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()), "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", fake_scid::MAX_SCID_BLOCKS_FROM_NOW); } } } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, channel); + update_maps_on_chan_removal!(self, &channel.context); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. - failed_channels.push(channel.force_shutdown(true)); + failed_channels.push(channel.context.force_shutdown(true)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } let reason_message = format!("{}", reason); - self.issue_channel_close_events(channel, reason); + self.issue_channel_close_events(&channel.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: channel.get_counterparty_node_id(), + node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.channel_id(), + channel_id: channel.context.channel_id(), data: reason_message, } }, }); @@ -6787,12 +6985,22 @@ where peer_state.channel_by_id.retain(|_, chan| { chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { - update_maps_on_chan_removal!(self, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); + update_maps_on_chan_removal!(self, &chan.context); + self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); return false; } true }); + peer_state.inbound_v1_channel_by_id.retain(|_, chan| { + update_maps_on_chan_removal!(self, &chan.context); + self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); + false + }); + peer_state.outbound_v1_channel_by_id.retain(|_, chan| { + update_maps_on_chan_removal!(self, &chan.context); + self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); + false + }); pending_msg_events.retain(|msg| { match msg { // V1 Channel Establishment @@ -6874,6 +7082,8 @@ where } e.insert(Mutex::new(PeerState { channel_by_id: HashMap::new(), + outbound_v1_channel_by_id: HashMap::new(), + inbound_v1_channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), monitor_update_blocked_actions: BTreeMap::new(), @@ -6907,8 +7117,8 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { + let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id { + if !chan.context.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now // lost. We can't have had anything pending related to this channel, so we just @@ -6916,13 +7126,13 @@ where false } else { pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.get_counterparty_node_id(), + node_id: chan.context.get_counterparty_node_id(), msg: chan.get_channel_reestablish(&self.logger), }); true } } else { true }; - if retain && chan.get_counterparty_node_id() != *counterparty_node_id { + if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id { if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { @@ -6949,7 +7159,9 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - peer_state.channel_by_id.keys().cloned().collect() + peer_state.channel_by_id.keys().cloned() + .chain(peer_state.outbound_v1_channel_by_id.keys().cloned()) + .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect() }; for channel_id in channel_ids { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel @@ -6963,7 +7175,7 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { + if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) { if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, @@ -7140,10 +7352,9 @@ impl Writeable for ChannelDetails { (14, user_channel_id_low, required), (16, self.balance_msat, required), (18, self.outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), + (19, self.next_outbound_htlc_limit_msat, required), (20, self.inbound_capacity_msat, required), + (21, self.next_outbound_htlc_minimum_msat, required), (22, self.confirmations_required, option), (24, self.force_close_spend_delay, option), (26, self.is_outbound, required), @@ -7180,6 +7391,7 @@ impl Readable for ChannelDetails { // filled in, so we can safely unwrap it here. (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), (20, inbound_capacity_msat, required), + (21, next_outbound_htlc_minimum_msat, (default_value, 0)), (22, confirmations_required, option), (24, force_close_spend_delay, option), (26, is_outbound, required), @@ -7213,6 +7425,7 @@ impl Readable for ChannelDetails { balance_msat: balance_msat.0.unwrap(), outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), + next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), confirmations_required, confirmations, @@ -7249,6 +7462,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, payment_preimage, required), (2, incoming_cltv_expiry, required), (3, payment_metadata, option), + (4, payment_data, option), // Added in 0.0.116 }, ;); @@ -7259,6 +7473,7 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (6, outgoing_amt_msat, required), (8, outgoing_cltv_value, required), (9, incoming_amt_msat, option), + (10, skimmed_fee_msat, option), }); @@ -7357,6 +7572,7 @@ impl Writeable for ClaimableHTLC { (5, self.total_value_received, option), (6, self.cltv_expiry, required), (8, keysend_preimage, option), + (10, self.counterparty_skimmed_fee_msat, option), }); Ok(()) } @@ -7364,24 +7580,19 @@ impl Writeable for ClaimableHTLC { impl Readable for ClaimableHTLC { fn read(reader: &mut R) -> Result { - let mut prev_hop = crate::util::ser::RequiredWrapper(None); - let mut value = 0; - let mut sender_intended_value = None; - let mut payment_data: Option = None; - let mut cltv_expiry = 0; - let mut total_value_received = None; - let mut total_msat = None; - let mut keysend_preimage: Option = None; - read_tlv_fields!(reader, { + _init_and_read_tlv_fields!(reader, { (0, prev_hop, required), (1, total_msat, option), - (2, value, required), + (2, value_ser, required), (3, sender_intended_value, option), - (4, payment_data, option), + (4, payment_data_opt, option), (5, total_value_received, option), (6, cltv_expiry, required), - (8, keysend_preimage, option) + (8, keysend_preimage, option), + (10, counterparty_skimmed_fee_msat, option), }); + let payment_data: Option = payment_data_opt; + let value = value_ser.0.unwrap(); let onion_payload = match keysend_preimage { Some(p) => { if payment_data.is_some() { @@ -7410,7 +7621,8 @@ impl Readable for ClaimableHTLC { total_value_received, total_msat: total_msat.unwrap(), onion_payload, - cltv_expiry, + cltv_expiry: cltv_expiry.0.unwrap(), + counterparty_skimmed_fee_msat, }) } } @@ -7548,7 +7760,7 @@ where } number_of_channels += peer_state.channel_by_id.len(); for (_, channel) in peer_state.channel_by_id.iter() { - if !channel.is_funding_initiated() { + if !channel.context.is_funding_initiated() { unfunded_channels += 1; } } @@ -7560,7 +7772,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (_, channel) in peer_state.channel_by_id.iter() { - if channel.is_funding_initiated() { + if channel.context.is_funding_initiated() { channel.write(writer)?; } } @@ -7846,7 +8058,7 @@ where pub default_config: UserConfig, /// A map from channel funding outpoints to ChannelMonitors for those channels (ie - /// value.get_funding_txo() should be the key). + /// value.context.get_funding_txo() should be the key). /// /// If a monitor is inconsistent with the channel state during deserialization the channel will /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This @@ -7936,14 +8148,14 @@ where let mut channel: Channel<::Signer> = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; - let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); + log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -7952,13 +8164,13 @@ where } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || - channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); - let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); + log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); + let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update @@ -7966,8 +8178,8 @@ where } failed_htlcs.append(&mut new_failed_htlcs); channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: ClosureReason::OutdatedChannelManager }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { @@ -7985,29 +8197,29 @@ where // backwards leg of the HTLC will simply be rejected. log_info!(args.logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0)); - failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id())); + log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0)); + failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id())); } } } else { log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", - log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(), + log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); channel.complete_all_mon_updates_through(monitor.get_latest_update_id()); - if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); + if let Some(short_channel_id) = channel.context.get_short_channel_id() { + short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if channel.is_funding_initiated() { - id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + if channel.context.is_funding_initiated() { + id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id()); } - match peer_channels.entry(channel.get_counterparty_node_id()) { + match peer_channels.entry(channel.context.get_counterparty_node_id()) { hash_map::Entry::Occupied(mut entry) => { let by_id_map = entry.get_mut(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.context.channel_id(), channel); }, hash_map::Entry::Vacant(entry) => { let mut by_id_map = HashMap::new(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.context.channel_id(), channel); entry.insert(by_id_map); } } @@ -8016,14 +8228,14 @@ where // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.force_shutdown(false); + let _ = channel.context.force_shutdown(false); channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.channel_id(), - user_channel_id: channel.get_user_id(), + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), reason: ClosureReason::DisconnectedPeer, }, None)); } else { - log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id())); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); @@ -8075,6 +8287,8 @@ where let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), + outbound_v1_channel_by_id: HashMap::new(), + inbound_v1_channel_by_id: HashMap::new(), latest_features: Readable::read(reader)?, pending_msg_events: Vec::new(), monitor_update_blocked_actions: BTreeMap::new(), @@ -8112,7 +8326,7 @@ where let peer_state = peer_mtx.lock().unwrap(); for (_, chan) in peer_state.channel_by_id.iter() { for update in chan.uncompleted_unblocked_mon_updates() { - if let Some(funding_txo) = chan.get_funding_txo() { + if let Some(funding_txo) = chan.context.get_funding_txo() { log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}", update.update_id, log_bytes!(funding_txo.to_channel_id())); pending_background_events.push( @@ -8404,25 +8618,25 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { - if chan.outbound_scid_alias() == 0 { + if chan.context.outbound_scid_alias() == 0 { let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); if outbound_scid_aliases.insert(outbound_scid_alias) { break; } } - chan.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { + chan.context.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } - if chan.is_usable() { - if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + if chan.context.is_usable() { + if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } } @@ -8582,7 +8796,7 @@ mod tests { use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; use crate::util::errors::APIError; use crate::util::test_utils; - use crate::util::config::ChannelConfig; + use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::sign::EntropySource; #[test] @@ -8791,13 +9005,26 @@ mod tests { #[test] fn test_keysend_dup_payment_hash() { + do_test_keysend_dup_payment_hash(false); + do_test_keysend_dup_payment_hash(true); + } + + fn do_test_keysend_dup_payment_hash(accept_mpp_keysend: bool) { // (1): Test that a keysend payment with a duplicate payment hash to an existing pending // outbound regular payment fails as expected. // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment // fails as expected. + // (3): Test that a keysend payment with a duplicate payment hash to an existing keysend + // payment fails as expected. When `accept_mpp_keysend` is false, this tests that we + // reject MPP keysend payments, since in this case where the payment has no payment + // secret, a keysend payment with a duplicate hash is basically an MPP keysend. If + // `accept_mpp_keysend` is true, this tests that we only accept MPP keysends with + // payment secrets and reject otherwise. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut mpp_keysend_cfg = test_default_channel_config(); + mpp_keysend_cfg.accept_mpp_keysend = accept_mpp_keysend; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(mpp_keysend_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); let scorer = test_utils::TestScorer::new(); @@ -8809,7 +9036,7 @@ mod tests { // Next, attempt a keysend payment and make sure it fails. let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false), final_value_msat: 100_000, }; let route = find_route( @@ -8886,6 +9113,53 @@ mod tests { // Finally, succeed the keysend payment. claim_payment(&nodes[0], &expected_route, payment_preimage); + + // To start (3), send a keysend payment but don't claim it. + let payment_id_1 = PaymentId([44; 32]); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), payment_id_1).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path = vec![&nodes[1]]; + pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage)); + + // Next, attempt a keysend payment and make sure it fails. + let route_params = RouteParameters { + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false), + final_value_msat: 100_000, + }; + let route = find_route( + &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, + None, nodes[0].logger, &scorer, &(), &random_seed_bytes + ).unwrap(); + let payment_id_2 = PaymentId([45; 32]); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), payment_id_2).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, claim the original payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); } #[test] @@ -8902,7 +9176,7 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10_000, }; let network_graph = nodes[0].network_graph.clone(); @@ -8935,10 +9209,13 @@ mod tests { #[test] fn test_keysend_msg_with_secret_err() { - // Test that we error as expected if we receive a keysend payment that includes a payment secret. + // Test that we error as expected if we receive a keysend payment that includes a payment + // secret when we don't support MPP keysend. + let mut reject_mpp_keysend_cfg = test_default_channel_config(); + reject_mpp_keysend_cfg.accept_mpp_keysend = false; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(reject_mpp_keysend_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let payer_pubkey = nodes[0].node.get_our_node_id(); @@ -8946,7 +9223,7 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10_000, }; let network_graph = nodes[0].network_graph.clone(); @@ -9501,6 +9778,62 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); } + + #[test] + fn test_update_channel_config() { + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let mut user_config = test_default_channel_config(); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + let _ = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel = &nodes[0].node.list_channels()[0]; + + nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap(); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + + user_config.channel_config.forwarding_fee_base_msat += 10; + nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap(); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("expected BroadcastChannelUpdate event"), + } + + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap(); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0); + + let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6; + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate { + cltv_expiry_delta: Some(new_cltv_expiry_delta), + ..Default::default() + }).unwrap(); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("expected BroadcastChannelUpdate event"), + } + + let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100; + nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate { + forwarding_fee_proportional_millionths: Some(new_fee), + ..Default::default() + }).unwrap(); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta); + assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("expected BroadcastChannelUpdate event"), + } + } } #[cfg(ldk_bench)]