Merge pull request #2337 from alecchendev/2023-06-watchtower-support
[rust-lightning] / lightning / src / chain / chainmonitor.rs
index fdaa25f69b8ea7df39dc6b246d2f062095071019..7d698a220676b8dfd7c9a0df41b0b09f0799e2e9 100644 (file)
@@ -31,7 +31,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
 use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::chain::keysinterface::WriteableEcdsaChannelSigner;
+use crate::sign::WriteableEcdsaChannelSigner;
 use crate::events;
 use crate::events::{Event, EventHandler};
 use crate::util::atomic_counter::AtomicCounter;
@@ -94,6 +94,17 @@ impl MonitorUpdateId {
 ///    [`ChannelMonitorUpdateStatus::PermanentFailure`], in which case the channel will likely be
 ///    closed without broadcasting the latest state. See
 ///    [`ChannelMonitorUpdateStatus::PermanentFailure`] for more details.
+///
+/// Third-party watchtowers may be built as a part of an implementation of this trait, with the
+/// advantage that you can control whether to resume channel operation depending on if an update
+/// has been persisted to a watchtower. For this, you may find the following methods useful:
+/// [`ChannelMonitor::initial_counterparty_commitment_tx`],
+/// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
+/// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
+/// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
+///
+/// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
+/// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
 pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
        /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
        /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
@@ -217,8 +228,15 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> Deref for LockedChannelMonitor<
 /// or used independently to monitor channels remotely. See the [module-level documentation] for
 /// details.
 ///
+/// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
+/// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
+/// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
+/// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
+/// environment with spotty connections, like on mobile.
+///
 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
 /// [module-level documentation]: crate::chain::chainmonitor
+/// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
 pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
        where C::Target: chain::Filter,
         T::Target: BroadcasterInterface,
@@ -357,8 +375,7 @@ where C::Target: chain::Filter,
        /// claims which are awaiting confirmation.
        ///
        /// Includes the balances from each [`ChannelMonitor`] *except* those included in
-       /// `ignored_channels`, allowing you to filter out balances from channels which are still open
-       /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
+       /// `ignored_channels`.
        ///
        /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
        /// inclusion in the return value.
@@ -495,7 +512,7 @@ where C::Target: chain::Filter,
                self.event_notifier.notify();
        }
 
-       #[cfg(any(test, fuzzing, feature = "_test_utils"))]
+       #[cfg(any(test, feature = "_test_utils"))]
        pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
                use crate::events::EventsProvider;
                let events = core::cell::RefCell::new(Vec::new());
@@ -513,12 +530,13 @@ where C::Target: chain::Filter,
        pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
                &self, handler: H
        ) {
-               let mut pending_events = Vec::new();
-               for monitor_state in self.monitors.read().unwrap().values() {
-                       pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
-               }
-               for event in pending_events {
-                       handler(event).await;
+               // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
+               // crazy dance to process a monitor's events then only remove them once we've done so.
+               let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
+               for funding_txo in mons_to_process {
+                       let mut ev;
+                       super::channelmonitor::process_events_body!(
+                               self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
                }
        }
 
@@ -533,6 +551,20 @@ where C::Target: chain::Filter,
        pub fn get_update_future(&self) -> Future {
                self.event_notifier.get_future()
        }
+
+       /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
+       /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
+       /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
+       /// invoking this every 30 seconds, or lower if running in an environment with spotty
+       /// connections, like on mobile.
+       pub fn rebroadcast_pending_claims(&self) {
+               let monitors = self.monitors.read().unwrap();
+               for (_, monitor_holder) in &*monitors {
+                       monitor_holder.monitor.rebroadcast_pending_claims(
+                               &*self.broadcaster, &*self.fee_estimator, &*self.logger
+                       )
+               }
+       }
 }
 
 impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
@@ -761,30 +793,13 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
              L::Target: Logger,
              P::Target: Persist<ChannelSigner>,
 {
-       #[cfg(not(anchors))]
-       /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
-       ///
-       /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
-       /// order to handle these events.
-       ///
-       /// [`SpendableOutputs`]: events::Event::SpendableOutputs
-       fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
-               let mut pending_events = Vec::new();
-               for monitor_state in self.monitors.read().unwrap().values() {
-                       pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
-               }
-               for event in pending_events {
-                       handler.handle_event(event);
-               }
-       }
-       #[cfg(anchors)]
        /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
        ///
        /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
        /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
        /// within each channel. As the confirmation of a commitment transaction may be critical to the
-       /// safety of funds, this method must be invoked frequently, ideally once for every chain tip
-       /// update (block connected or disconnected).
+       /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
+       /// environment with spotty connections, like on mobile.
        ///
        /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
        /// order to handle these events.
@@ -792,22 +807,16 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
        /// [`SpendableOutputs`]: events::Event::SpendableOutputs
        /// [`BumpTransaction`]: events::Event::BumpTransaction
        fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
-               let mut pending_events = Vec::new();
                for monitor_state in self.monitors.read().unwrap().values() {
-                       pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
-               }
-               for event in pending_events {
-                       handler.handle_event(event);
+                       monitor_state.monitor.process_pending_events(&handler);
                }
        }
 }
 
 #[cfg(test)]
 mod tests {
-       use bitcoin::{BlockHeader, TxMerkleNode};
-       use bitcoin::hashes::Hash;
        use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
-       use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
+       use crate::{expect_payment_claimed, expect_payment_path_successful, get_event_msg};
        use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
        use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
        use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
@@ -890,7 +899,7 @@ mod tests {
 
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
-               expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
+               expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
                check_added_monitors!(nodes[0], 1);
                let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
@@ -903,7 +912,7 @@ mod tests {
                let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
-               expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
+               expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
                check_added_monitors!(nodes[0], 1);
                nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
@@ -951,10 +960,7 @@ mod tests {
 
                // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
                // channel is now closed, but the ChannelManager doesn't know that yet.
-               let new_header = BlockHeader {
-                       version: 2, time: 0, bits: 0, nonce: 0,
-                       prev_blockhash: nodes[0].best_block_info().0,
-                       merkle_root: TxMerkleNode::all_zeros() };
+               let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
                nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
                        &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
                assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
@@ -970,7 +976,8 @@ mod tests {
                        assert!(err.contains("ChannelMonitor storage failure")));
                check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
                check_closed_broadcast!(nodes[0], true);
-               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
+               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, 
+                       [nodes[1].node.get_our_node_id()], 100000);
 
                // However, as the ChainMonitor is still waiting for the original persistence to complete,
                // it won't yet release the MonitorEvents.
@@ -978,10 +985,7 @@ mod tests {
 
                if block_timeout {
                        // After three blocks, pending MontiorEvents should be released either way.
-                       let latest_header = BlockHeader {
-                               version: 2, time: 0, bits: 0, nonce: 0,
-                               prev_blockhash: nodes[0].best_block_info().0,
-                               merkle_root: TxMerkleNode::all_zeros() };
+                       let latest_header = create_dummy_header(nodes[0].best_block_info().0, 0);
                        nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
                } else {
                        let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
@@ -992,7 +996,7 @@ mod tests {
                        }
                }
 
-               expect_payment_sent!(nodes[0], payment_preimage);
+               expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
        }
 
        #[test]
@@ -1020,7 +1024,8 @@ mod tests {
                // ... however once we get events once, the channel will close, creating a channel-closed
                // ChannelMonitorUpdate.
                check_closed_broadcast!(nodes[0], true);
-               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() });
+               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() },
+                       [nodes[1].node.get_our_node_id()], 100000);
                check_added_monitors!(nodes[0], 1);
        }
 }