Merge pull request #949 from TheBlueMatt/2021-06-send-priv-update
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 5caffe8ee12c01cea64532656ffa84339767fd59..bbd32994e875138ff5574cbb6e45f381e43ebb2a 100644 (file)
@@ -36,8 +36,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
 use chain;
-use chain::Confirm;
-use chain::Watch;
+use chain::{Confirm, Watch, BestBlock};
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
@@ -508,34 +507,6 @@ pub struct ChainParameters {
        pub best_block: BestBlock,
 }
 
-/// The best known block as identified by its hash and height.
-#[derive(Clone, Copy, PartialEq)]
-pub struct BestBlock {
-       block_hash: BlockHash,
-       height: u32,
-}
-
-impl BestBlock {
-       /// Returns the best block from the genesis of the given network.
-       pub fn from_genesis(network: Network) -> Self {
-               BestBlock {
-                       block_hash: genesis_block(network).header.block_hash(),
-                       height: 0,
-               }
-       }
-
-       /// Returns the best block as identified by the given block hash and height.
-       pub fn new(block_hash: BlockHash, height: u32) -> Self {
-               BestBlock { block_hash, height }
-       }
-
-       /// Returns the best block hash.
-       pub fn block_hash(&self) -> BlockHash { self.block_hash }
-
-       /// Returns the best block height.
-       pub fn height(&self) -> u32 { self.height }
-}
-
 #[derive(Copy, Clone, PartialEq)]
 enum NotifyOption {
        DoPersist,
@@ -656,25 +627,74 @@ pub struct ChannelDetails {
        pub counterparty_features: InitFeatures,
        /// The value, in satoshis, of this channel as appears in the funding output
        pub channel_value_satoshis: u64,
+       /// The value, in satoshis, that must always be held in the channel for us. This value ensures
+       /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
+       /// this value on chain.
+       ///
+       /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
+       ///
+       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+       ///
+       /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
+       pub to_self_reserve_satoshis: Option<u64>,
+       /// The value, in satoshis, that must always be held in the channel for our counterparty. This
+       /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
+       /// claiming at least this value on chain.
+       ///
+       /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
+       ///
+       /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
+       pub to_remote_reserve_satoshis: u64,
        /// The user_id passed in to create_channel, or 0 if the channel was inbound.
        pub user_id: u64,
        /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
        /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
        /// available for inclusion in new outbound HTLCs). This further does not include any pending
        /// outgoing HTLCs which are awaiting some other resolution to be sent.
+       ///
+       /// This value is not exact. Due to various in-flight changes, feerate changes, and our
+       /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
+       /// should be able to spend nearly this amount.
        pub outbound_capacity_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
        /// available for inclusion in new inbound HTLCs).
        /// Note that there are some corner cases not fully handled here, so the actual available
        /// inbound capacity may be slightly higher than this.
+       ///
+       /// This value is not exact. Due to various in-flight changes, feerate changes, and our
+       /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
+       /// However, our counterparty should be able to spend nearly this amount.
        pub inbound_capacity_msat: u64,
+       /// The number of required confirmations on the funding transaction before the funding will be
+       /// considered "locked". This number is selected by the channel fundee (i.e. us if
+       /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
+       /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
+       /// [`ChannelHandshakeLimits::max_minimum_depth`].
+       ///
+       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+       ///
+       /// [`is_outbound`]: ChannelDetails::is_outbound
+       /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
+       /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
+       pub confirmations_required: Option<u32>,
+       /// The number of blocks (after our commitment transaction confirms) that we will need to wait
+       /// until we can claim our funds after we force-close the channel. During this time our
+       /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
+       /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
+       /// time to claim our non-HTLC-encumbered funds.
+       ///
+       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+       pub spend_csv_on_our_commitment_funds: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
        /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
        /// channel is not currently being shut down. `funding_locked` message exchange implies the
        /// required confirmation count has been reached (and we were connected to the peer at some
-       /// point after the funding transaction received enough confirmations).
+       /// point after the funding transaction received enough confirmations). The required
+       /// confirmation count is provided in [`confirmations_required`].
+       ///
+       /// [`confirmations_required`]: ChannelDetails::confirmations_required
        pub is_funding_locked: bool,
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
        /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
@@ -1147,6 +1167,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        res.reserve(channel_state.by_id.len());
                        for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
                                let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+                               let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+                                       channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                                res.push(ChannelDetails {
                                        channel_id: (*channel_id).clone(),
                                        funding_txo: channel.get_funding_txo(),
@@ -1154,9 +1176,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        remote_network_id: channel.get_counterparty_node_id(),
                                        counterparty_features: InitFeatures::empty(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
+                                       to_self_reserve_satoshis,
+                                       to_remote_reserve_satoshis,
                                        inbound_capacity_msat,
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
+                                       confirmations_required: channel.minimum_depth(),
+                                       spend_csv_on_our_commitment_funds: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
                                        is_funding_locked: channel.is_usable(),
                                        is_usable: channel.is_live(),
@@ -1555,8 +1581,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longuer delay.
+                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
+                                       // But, to be safe against policy reception, we use a longer delay.
                                        if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
                                                break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
@@ -1598,6 +1624,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                action: msgs::ErrorAction::IgnoreError
                        });
                }
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -1607,6 +1634,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// provided evidence that they know about the existence of the channel.
        /// May be called with channel_state already locked!
        fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id() {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -2789,7 +2817,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let (mut pending_failures, chan_restoration_res) = {
+               let chan_restoration_res;
+               let mut pending_failures = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -2801,7 +2830,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
-                       (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+                       let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+                               // We only send a channel_update in the case where we are just now sending a
+                               // funding_locked and the channel is in a usable state. Further, we rely on the
+                               // normal announcement_signatures process to send a channel_update for public
+                               // channels, only generating a unicast channel_update if this is a private channel.
+                               Some(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: channel.get().get_counterparty_node_id(),
+                                       msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+                               })
+                       } else { None };
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+                       if let Some(upd) = channel_update {
+                               channel_state.pending_msg_events.push(upd);
+                       }
+                       pending_failures
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                for failure in pending_failures.drain(..) {
@@ -2964,6 +3007,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
+                               } else if chan.get().is_usable() {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                       });
                                }
                                Ok(())
                        },
@@ -3386,7 +3434,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
-                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let msg_from_node_one = msg.contents.flags & 1 == 0;
+                               if were_node_one == msg_from_node_one {
+                                       return Ok(NotifyOption::SkipPersist);
+                               } else {
+                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
@@ -3394,7 +3448,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
-               let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = {
+               let chan_restoration_res;
+               let (htlcs_failed_forward, need_lnd_workaround) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -3409,15 +3464,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                        let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
                                                try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+                                       let mut channel_update = None;
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg,
                                                });
+                                       } else if chan.get().is_usable() {
+                                               // If the channel is in a usable state (ie the channel is not being shut
+                                               // down), send a unicast channel_update to our counterparty to make sure
+                                               // they have the latest channel parameters.
+                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                       msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                               });
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
-                                       (htlcs_failed_forward, need_lnd_workaround,
-                                               handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+                                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+                                       if let Some(upd) = channel_update {
+                                               channel_state.pending_msg_events.push(upd);
+                                       }
+                                       (htlcs_failed_forward, need_lnd_workaround)
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
@@ -3970,6 +4037,12 @@ where
                                                                node_id: channel.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
+                                               } else if channel.is_usable() {
+                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: channel.get_counterparty_node_id(),
+                                                               msg: self.get_channel_update_for_unicast(channel).unwrap(),
+                                                       });
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
                                                }
@@ -4049,6 +4122,12 @@ where
                let guard = mtx.lock().unwrap();
                *guard
        }
+
+       /// Gets the latest best block which was connected either via the [`chain::Listen`] or
+       /// [`chain::Confirm`] interfaces.
+       pub fn current_best_block(&self) -> BestBlock {
+               self.best_block.read().unwrap().clone()
+       }
 }
 
 impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
@@ -4209,6 +4288,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                       &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
@@ -4942,6 +5022,31 @@ mod tests {
                // At this point the channel info given by peers should still be the same.
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // An earlier version of handle_channel_update didn't check the directionality of the
+               // update message and would always update the local fee info, even if our peer was
+               // (spuriously) forwarding us our own channel_update.
+               let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
+               let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+               let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+
+               // First deliver each peers' own message, checking that the node doesn't need to be
+               // persisted and that its channel info remains the same.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
+               // the channel info has updated.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
        }
 }
 
@@ -5042,7 +5147,19 @@ pub mod bench {
                Listen::block_connected(&node_b, &block, 1);
 
                node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
-               node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+               let msg_events = node_a.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               match msg_events[0] {
+                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                               get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+                       },
+                       _ => panic!(),
+               }
+               match msg_events[1] {
+                       MessageSendEvent::SendChannelUpdate { .. } => {},
+                       _ => panic!(),
+               }
 
                let dummy_graph = NetworkGraph::new(genesis_hash);