X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=482c16356314df373e8ea7ee5645f5febbf175d9;hb=f06f9d11365360dc2add96acd7916673ea9ce383;hp=84e500c54d6054ebfd368cf984d027b42cc0aa28;hpb=7eff56b12f1f9cccb80e7b8b8efc69e569f89985;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 84e500c5..482c1635 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,8 +36,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; use chain; -use chain::Confirm; -use chain::Watch; +use chain::{Confirm, Watch, BestBlock}; use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; @@ -45,7 +44,7 @@ use chain::transaction::{OutPoint, TransactionData}; // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; pub use ln::channel::CounterpartyForwardingInfo; -use ln::channel::{Channel, ChannelError, ChannelUpdateStatus}; +use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; @@ -58,7 +57,7 @@ use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEv use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use util::errors::APIError; use prelude::*; @@ -508,34 +507,6 @@ pub struct ChainParameters { pub best_block: BestBlock, } -/// The best known block as identified by its hash and height. -#[derive(Clone, Copy, PartialEq)] -pub struct BestBlock { - block_hash: BlockHash, - height: u32, -} - -impl BestBlock { - /// Returns the best block from the genesis of the given network. - pub fn from_genesis(network: Network) -> Self { - BestBlock { - block_hash: genesis_block(network).header.block_hash(), - height: 0, - } - } - - /// Returns the best block as identified by the given block hash and height. - pub fn new(block_hash: BlockHash, height: u32) -> Self { - BestBlock { block_hash, height } - } - - /// Returns the best block hash. - pub fn block_hash(&self) -> BlockHash { self.block_hash } - - /// Returns the best block height. - pub fn height(&self) -> u32 { self.height } -} - #[derive(Copy, Clone, PartialEq)] enum NotifyOption { DoPersist, @@ -626,13 +597,13 @@ pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3; const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See -// ChannelMontior::would_broadcast_at_height for a description of why this is needed. +// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, /// thereafter this is the txid of the funding transaction xor the funding transaction output). @@ -656,29 +627,77 @@ pub struct ChannelDetails { pub counterparty_features: InitFeatures, /// The value, in satoshis, of this channel as appears in the funding output pub channel_value_satoshis: u64, + /// The value, in satoshis, that must always be held in the channel for us. This value ensures + /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least + /// this value on chain. + /// + /// This value is not included in [`outbound_capacity_msat`] as it can never be spent. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + /// + /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat + pub to_self_reserve_satoshis: Option, + /// The value, in satoshis, that must always be held in the channel for our counterparty. This + /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by + /// claiming at least this value on chain. + /// + /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. + /// + /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat + pub to_remote_reserve_satoshis: u64, /// The user_id passed in to create_channel, or 0 if the channel was inbound. pub user_id: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending /// outgoing HTLCs which are awaiting some other resolution to be sent. + /// + /// This value is not exact. Due to various in-flight changes, feerate changes, and our + /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we + /// should be able to spend nearly this amount. pub outbound_capacity_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new inbound HTLCs). /// Note that there are some corner cases not fully handled here, so the actual available /// inbound capacity may be slightly higher than this. + /// + /// This value is not exact. Due to various in-flight changes, feerate changes, and our + /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable. + /// However, our counterparty should be able to spend nearly this amount. pub inbound_capacity_msat: u64, + /// The number of required confirmations on the funding transaction before the funding will be + /// considered "locked". This number is selected by the channel fundee (i.e. us if + /// [`is_outbound`] is *not* set), and can be selected for inbound channels with + /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with + /// [`ChannelHandshakeLimits::max_minimum_depth`]. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + /// + /// [`is_outbound`]: ChannelDetails::is_outbound + /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth + /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth + pub confirmations_required: Option, + /// The number of blocks (after our commitment transaction confirms) that we will need to wait + /// until we can claim our funds after we force-close the channel. During this time our + /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty + /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any + /// time to claim our non-HTLC-encumbered funds. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + pub spend_csv_on_our_commitment_funds: Option, /// True if the channel was initiated (and thus funded) by us. pub is_outbound: bool, /// True if the channel is confirmed, funding_locked messages have been exchanged, and the /// channel is not currently being shut down. `funding_locked` message exchange implies the /// required confirmation count has been reached (and we were connected to the peer at some - /// point after the funding transaction received enough confirmations). + /// point after the funding transaction received enough confirmations). The required + /// confirmation count is provided in [`confirmations_required`]. + /// + /// [`confirmations_required`]: ChannelDetails::confirmations_required pub is_funding_locked: bool, /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b) - /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the - /// channel is not currently negotiating a shutdown. + /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. /// /// This is a strict superset of `is_funding_locked`. pub is_usable: bool, @@ -1147,6 +1166,8 @@ impl ChannelMana res.reserve(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter().filter(f) { let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), funding_txo: channel.get_funding_txo(), @@ -1154,9 +1175,13 @@ impl ChannelMana remote_network_id: channel.get_counterparty_node_id(), counterparty_features: InitFeatures::empty(), channel_value_satoshis: channel.get_value_satoshis(), + to_self_reserve_satoshis, + to_remote_reserve_satoshis, inbound_capacity_msat, outbound_capacity_msat, user_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + spend_csv_on_our_commitment_funds: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), is_funding_locked: channel.is_usable(), is_usable: channel.is_live(), @@ -2654,45 +2679,48 @@ impl ChannelMana }; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok((msgs, monitor_option)) => { - if let Some(monitor_update) = monitor_option { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - if was_frozen_for_monitor { - assert!(msgs.is_none()); - } else { - return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err()))); - } + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + return Err(Some(( + chan.get().get_counterparty_node_id(), + handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), + ))); + } + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); } - } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Caliming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); } return Ok(()) }, - Err(e) => { - // TODO: Do something with e? - // This should only occur if we are claiming an HTLC at the same time as the - // HTLC is being failed (eg because a block is being connected and this caused - // an HTLC to time out). This should, of course, only occur if the user is the - // one doing the claiming (as it being a part of a peer claim would imply we're - // about to lose funds) and only if the lock in claim_funds was dropped as a - // previous HTLC was failed (thus not for an MPP payment). - debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e); - return Err(None) + Err((e, monitor_update)) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", + payment_preimage, e); + } + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id); + if drop { + chan.remove_entry(); + } + return Err(Some((counterparty_node_id, res))); }, } } else { unreachable!(); } @@ -3346,14 +3374,15 @@ impl ChannelMana Ok(()) } - fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> { + /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err. + fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { Some(chan_id) => chan_id.clone(), None => { // It's not a local channel - return Ok(()) + return Ok(NotifyOption::SkipPersist) } }; match channel_state.by_id.entry(chan_id) { @@ -3363,7 +3392,7 @@ impl ChannelMana // If the announcement is about a channel of ours which is public, some // other peer may simply be forwarding all its gossip to us. Don't provide // a scary-looking error message and return Ok instead. - return Ok(()); + return Ok(NotifyOption::SkipPersist); } return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } @@ -3371,11 +3400,11 @@ impl ChannelMana }, hash_map::Entry::Vacant(_) => unreachable!() } - Ok(()) + Ok(NotifyOption::DoPersist) } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (htlcs_failed_forward, chan_restoration_res) = { + let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3396,13 +3425,19 @@ impl ChannelMana msg, }); } - (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) + let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + (htlcs_failed_forward, need_lnd_workaround, + handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; post_handle_chan_restoration!(self, chan_restoration_res); self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id); + + if let Some(funding_locked_msg) = need_lnd_workaround { + self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?; + } Ok(()) } @@ -4024,6 +4059,12 @@ where let guard = mtx.lock().unwrap(); *guard } + + /// Gets the latest best block which was connected either via the [`chain::Listen`] or + /// [`chain::Confirm`] interfaces. + pub fn current_best_block(&self) -> BestBlock { + self.best_block.read().unwrap().clone() + } } impl @@ -4110,8 +4151,13 @@ impl } fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id); + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) { + persist + } else { + NotifyOption::SkipPersist + } + }); } fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) { @@ -4331,22 +4377,22 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, Forward) => { - (0, onion_packet), - (2, short_channel_id), - }, {}, {}, + (0, onion_packet, required), + (2, short_channel_id, required), + }, (1, Receive) => { - (0, payment_data), - (2, incoming_cltv_expiry), - }, {}, {} + (0, payment_data, required), + (2, incoming_cltv_expiry, required), + } ;); impl_writeable_tlv_based!(PendingHTLCInfo, { - (0, routing), - (2, incoming_shared_secret), - (4, payment_hash), - (6, amt_to_forward), - (8, outgoing_cltv_value) -}, {}, {}); + (0, routing, required), + (2, incoming_shared_secret, required), + (4, payment_hash, required), + (6, amt_to_forward, required), + (8, outgoing_cltv_value, required) +}); impl_writeable_tlv_based_enum!(HTLCFailureMsg, ; (0, Relay), @@ -4358,60 +4404,58 @@ impl_writeable_tlv_based_enum!(PendingHTLCStatus, ; ); impl_writeable_tlv_based!(HTLCPreviousHopData, { - (0, short_channel_id), - (2, outpoint), - (4, htlc_id), - (6, incoming_packet_shared_secret) -}, {}, {}); + (0, short_channel_id, required), + (2, outpoint, required), + (4, htlc_id, required), + (6, incoming_packet_shared_secret, required) +}); impl_writeable_tlv_based!(ClaimableHTLC, { - (0, prev_hop), - (2, value), - (4, payment_data), - (6, cltv_expiry), -}, {}, {}); + (0, prev_hop, required), + (2, value, required), + (4, payment_data, required), + (6, cltv_expiry, required), +}); impl_writeable_tlv_based_enum!(HTLCSource, (0, OutboundRoute) => { - (0, session_priv), - (2, first_hop_htlc_msat), - }, {}, { - (4, path), - }; + (0, session_priv, required), + (2, first_hop_htlc_msat, required), + (4, path, vec_type), + }, ; (1, PreviousHopData) ); impl_writeable_tlv_based_enum!(HTLCFailReason, (0, LightningError) => { - (0, err), - }, {}, {}, + (0, err, required), + }, (1, Reason) => { - (0, failure_code), - }, {}, { - (2, data), + (0, failure_code, required), + (2, data, vec_type), }, ;); impl_writeable_tlv_based_enum!(HTLCForwardInfo, (0, AddHTLC) => { - (0, forward_info), - (2, prev_short_channel_id), - (4, prev_htlc_id), - (6, prev_funding_outpoint), - }, {}, {}, + (0, forward_info, required), + (2, prev_short_channel_id, required), + (4, prev_htlc_id, required), + (6, prev_funding_outpoint, required), + }, (1, FailHTLC) => { - (0, htlc_id), - (2, err_packet), - }, {}, {}, + (0, htlc_id, required), + (2, err_packet, required), + }, ;); impl_writeable_tlv_based!(PendingInboundPayment, { - (0, payment_secret), - (2, expiry_time), - (4, user_payment_id), - (6, payment_preimage), - (8, min_value_msat), -}, {}, {}); + (0, payment_secret, required), + (2, expiry_time, required), + (4, user_payment_id, required), + (6, payment_preimage, required), + (8, min_value_msat, required), +}); impl Writeable for ChannelManager where M::Target: chain::Watch, @@ -4506,7 +4550,7 @@ impl Writeable f session_priv.write(writer)?; } - write_tlv_fields!(writer, {}, {}); + write_tlv_fields!(writer, {}); Ok(()) } @@ -4658,6 +4702,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning"); return Err(DecodeError::InvalidValue); } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || @@ -4677,6 +4723,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds."); + log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning"); return Err(DecodeError::InvalidValue); } } @@ -4759,7 +4807,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - read_tlv_fields!(reader, {}, {}); + read_tlv_fields!(reader, {}); let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); @@ -4819,6 +4867,9 @@ mod tests { use core::sync::atomic::{AtomicBool, Ordering}; use std::thread; use core::time::Duration; + use ln::functional_test_utils::*; + use ln::features::InitFeatures; + use ln::msgs::ChannelMessageHandler; #[test] fn test_wait_timeout() { @@ -4861,6 +4912,53 @@ mod tests { } } } + + #[test] + fn test_notify_limits() { + // Check that a few cases which don't require the persistence of a new ChannelManager, + // indeed, do not cause the persistence of a new ChannelManager. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + + // We check that the channel info nodes have doesn't change too early, even though we try + // to connect messages with new values + chan.0.contents.fee_base_msat *= 2; + chan.1.contents.fee_base_msat *= 2; + let node_a_chan_info = nodes[0].node.list_channels()[0].clone(); + let node_b_chan_info = nodes[1].node.list_channels()[0].clone(); + + // The first two nodes (which opened a channel) should now require fresh persistence + assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + // ... but the last node should not. + assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + // After persisting the first two nodes they should no longer need fresh persistence. + assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + + // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update + // about the channel. + nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0); + nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1); + assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + + // The nodes which are a party to the channel should also ignore messages from unrelated + // parties. + nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0); + nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1); + nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0); + nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1); + assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + + // At this point the channel info given by peers should still be the same. + assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); + assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]