Merge pull request #1564 from TheBlueMatt/2022-06-panic-on-behind
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 27 Jun 2022 16:34:26 +0000 (09:34 -0700)
committerGitHub <noreply@github.com>
Mon, 27 Jun 2022 16:34:26 +0000 (09:34 -0700)
Panic if we're running with outdated state instead of force-closing

fuzz/src/full_stack.rs
lightning-background-processor/src/lib.rs
lightning-persister/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/util/events.rs

index fae14de57f18ec73fffc1613427f05503b6eeba8..cff00669a9d6a0dde486987a1ea3124a165e9fdb 100644 (file)
@@ -252,7 +252,7 @@ impl<'a> Drop for MoneyLossDetector<'a> {
                        }
 
                        // Force all channels onto the chain (and time out claim txn)
-                       self.manager.force_close_all_channels();
+                       self.manager.force_close_all_channels_broadcasting_latest_txn();
                }
        }
 }
@@ -624,7 +624,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                                let channel_id = get_slice!(1)[0] as usize;
                                if channel_id >= channels.len() { return; }
                                channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
-                               channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
+                               channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
                        },
                        // 15 is above
                        _ => return,
index 10ae69e2fe1791b1edf428dfe47181e40633d800..11dca44bbab3d7b5c91585835ddc02ccf4baa393 100644 (file)
@@ -744,7 +744,7 @@ mod tests {
                }
 
                // Force-close the channel.
-               nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
 
                // Check that the force-close updates are persisted.
                check_persisted_data!(nodes[0].node, filepath.clone());
@@ -880,7 +880,7 @@ mod tests {
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
                // Force close the channel and check that the SpendableOutputs event was handled.
-               nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
                let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
                confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
                let event = receiver
index 28845150c44606f79aced14c8984e72c10317d37..3e32791711e1428746eaadef6f26f11248370439 100644 (file)
@@ -213,7 +213,7 @@ mod tests {
 
                // Force close because cooperative close doesn't result in any persisted
                // updates.
-               nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
                check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
@@ -247,7 +247,7 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-               nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
@@ -286,7 +286,7 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-               nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
index 91575195a8826a3ae510e322d85420f06269325c..0d2b3b6a9f941c117c80465b7ff886f329a349ad 100644 (file)
@@ -226,7 +226,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        }
 
        // ...and make sure we can force-close a frozen channel
-       nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], true);
 
index b88a38b56504eb652c68fb445c29b5e4e4addb5b..c5133a850584fd5b6e91d9c91e43a087c3c268d3 100644 (file)
@@ -802,7 +802,6 @@ pub(super) enum ChannelError {
        Ignore(String),
        Warn(String),
        Close(String),
-       CloseDelayBroadcast(String),
 }
 
 impl fmt::Debug for ChannelError {
@@ -811,7 +810,6 @@ impl fmt::Debug for ChannelError {
                        &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
                        &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
                        &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
-                       &ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
                }
        }
 }
@@ -3799,6 +3797,11 @@ impl<Signer: Sign> Channel<Signer> {
 
        /// May panic if some calls other than message-handling calls (which will all Err immediately)
        /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+       ///
+       /// Some links printed in log lines are included here to check them during build (when run with
+       /// `cargo doc --document-private-items`):
+       /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+       /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
        pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L,
                node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock)
        -> Result<ReestablishResponses, ChannelError> where L::Target: Logger {
@@ -3824,9 +3827,20 @@ impl<Signer: Sign> Channel<Signer> {
                                                return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
                                        }
                                        if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
-                                               return Err(ChannelError::CloseDelayBroadcast(
-                                                       "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting".to_owned()
-                                               ));
+                                               macro_rules! log_and_panic {
+                                                       ($err_msg: expr) => {
+                                                               log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+                                                               panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+                                                       }
+                                               }
+                                               log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+                                                       This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+                                                       More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+                                                       If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+                                                       ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+                                                       ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+                                                       Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+                                                       See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
                                        }
                                },
                                OptionalField::Absent => {}
@@ -3933,7 +3947,7 @@ impl<Signer: Sign> Channel<Signer> {
                                // now!
                                match self.free_holding_cell_htlcs(logger) {
                                        Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
-                                       Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) =>
+                                       Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
                                                panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
                                        Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
                                                Ok(ReestablishResponses {
index 54608e05e42bd8be9339273724a8f2115cf10b96..1e3d8de379ac30841922db96e652044a6ca21e32 100644 (file)
@@ -369,15 +369,6 @@ impl MsgHandleErrInternal {
                                                },
                                        },
                                },
-                               ChannelError::CloseDelayBroadcast(msg) => LightningError {
-                                       err: msg.clone(),
-                                       action: msgs::ErrorAction::SendErrorMessage {
-                                               msg: msgs::ErrorMessage {
-                                                       channel_id,
-                                                       data: msg
-                                               },
-                                       },
-                               },
                        },
                        chan_id: None,
                        shutdown_finish: None,
@@ -1273,13 +1264,6 @@ macro_rules! convert_chan_err {
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
-                       ChannelError::CloseDelayBroadcast(msg) => {
-                               log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, $short_to_id, $channel);
-                               let shutdown_res = $channel.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
-                                       shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
-                       }
                }
        }
 }
@@ -1945,7 +1929,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        /// `peer_msg` should be set when we receive a message from a peer, but not set when the
        /// user closes, which will be re-exposed as the `ChannelClosed` reason.
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+       -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
@@ -1964,7 +1949,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-               self.finish_force_close_channel(chan.force_shutdown(true));
+               self.finish_force_close_channel(chan.force_shutdown(broadcast));
                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1975,13 +1960,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(chan.get_counterparty_node_id())
        }
 
-       /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
-       /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
-       /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
-       /// channel.
-       pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+       fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
+               match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
                                self.channel_state.lock().unwrap().pending_msg_events.push(
                                        events::MessageSendEvent::HandleError {
@@ -1997,11 +1978,39 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
+       /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
+       /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
+       /// channel.
+       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       -> Result<(), APIError> {
+               self.force_close_sending_error(channel_id, counterparty_node_id, true)
+       }
+
+       /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
+       /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
+       /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+       ///
+       /// You can always get the latest local transaction(s) to broadcast from
+       /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+       pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       -> Result<(), APIError> {
+               self.force_close_sending_error(channel_id, counterparty_node_id, false)
+       }
+
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
        /// for each to the chain and rejecting new HTLCs on each.
-       pub fn force_close_all_channels(&self) {
+       pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+               for chan in self.list_channels() {
+                       let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+               }
+       }
+
+       /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
+       /// local transaction(s).
+       pub fn force_close_all_channels_without_broadcasting_txn(&self) {
                for chan in self.list_channels() {
-                       let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
+                       let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
                }
        }
 
@@ -3188,7 +3197,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        // ChannelClosed event is generated by handle_error for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
-                                                                               ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
                                                                        handle_errors.push((counterparty_node_id, err));
                                                                        continue;
@@ -6058,7 +6066,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        for chan in self.list_channels() {
                                if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
+                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
                                }
                        }
                } else {
@@ -6080,7 +6088,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        }
 
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                       let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
+                       let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
                }
        }
 }
index 02ef5f37b9b2866c182e1773968002a8851b94b1..a598e7de70d0212122b0c4c42a3754eb6f1b74eb 100644 (file)
@@ -2203,7 +2203,7 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        {
@@ -2224,7 +2224,7 @@ fn channel_monitor_network_test() {
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
@@ -2264,7 +2264,7 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[2], 1);
        check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
@@ -3403,7 +3403,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -3466,7 +3466,7 @@ fn test_force_close_fail_back() {
        // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
 
-       nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
@@ -4793,7 +4793,7 @@ fn test_claim_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
-       nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
@@ -4822,7 +4822,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
-       nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
@@ -7399,14 +7399,11 @@ fn test_user_configurable_csv_delay() {
        } else { assert!(false); }
 }
 
-#[test]
-fn test_data_loss_protect() {
-       // We want to be sure that :
-       // * we don't broadcast our Local Commitment Tx in case of fallen behind
-       //   (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
-       // * we close channel in case of detecting other being fallen behind
-       // * we are able to claim our own outputs thanks to to_remote being static
-       // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
+fn do_test_data_loss_protect(reconnect_panicing: bool) {
+       // When we get a data_loss_protect proving we're behind, we immediately panic as the
+       // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
+       // panic message informs the user they should force-close without broadcasting, which is tested
+       // if `reconnect_panicing` is not set.
        let persister;
        let logger;
        let fee_estimator;
@@ -7464,53 +7461,53 @@ fn test_data_loss_protect() {
 
        check_added_monitors!(nodes[0], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       if reconnect_panicing {
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
-       let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+               let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
-       // Check we don't broadcast any transactions following learning of per_commitment_point from B
-       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
-       check_added_monitors!(nodes[0], 1);
+               // Check we close channel detecting A is fallen-behind
+               // Check that we sent the warning message when we detected that A has fallen behind,
+               // and give the possibility for A to recover from the warning.
+               nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+               let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+               assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+
+               {
+                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+                       // The node B should not broadcast the transaction to force close the channel!
+                       assert!(node_txn.is_empty());
+               }
+
+               let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+               // Check A panics upon seeing proof it has fallen behind.
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
+               return; // By this point we should have panic'ed!
+       }
 
+       nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
        {
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 0);
        }
 
-       let mut reestablish_1 = Vec::with_capacity(1);
        for msg in nodes[0].node.get_and_clear_pending_msg_events() {
-               if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
-                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
-                       reestablish_1.push(msg.clone());
-               } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+               if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
                } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
                        match action {
                                &ErrorAction::SendErrorMessage { ref msg } => {
-                                       assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
+                                       assert_eq!(msg.data, "Channel force-closed");
                                },
                                _ => panic!("Unexpected event!"),
                        }
                } else {
-                       panic!("Unexpected event")
+                       panic!("Unexpected event {:?}", msg)
                }
        }
 
-       // Check we close channel detecting A is fallen-behind
-       // Check that we sent the warning message when we detected that A has fallen behind,
-       // and give the possibility for A to recover from the warning.
-       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-       let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
-       assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
-
-       // Check A is able to claim to_remote output
-       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-       // The node B should not broadcast the transaction to force close the channel!
-       assert!(node_txn.is_empty());
-       // B should now detect that there is something wrong and should force close the channel.
-       let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
-
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
@@ -7541,6 +7538,17 @@ fn test_data_loss_protect() {
        check_closed_broadcast!(nodes[1], false);
 }
 
+#[test]
+#[should_panic]
+fn test_data_loss_protect_showing_stale_state_panics() {
+       do_test_data_loss_protect(true);
+}
+
+#[test]
+fn test_force_close_without_broadcast() {
+       do_test_data_loss_protect(false);
+}
+
 #[test]
 fn test_check_htlc_underpaying() {
        // Send payment through A -> B but A is maliciously
@@ -8365,7 +8373,7 @@ fn test_manually_accept_inbound_channel_request() {
                _ => panic!("Unexpected event"),
        }
 
-       nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
 
        let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_msg_ev.len(), 1);
@@ -8400,7 +8408,7 @@ fn test_manually_reject_inbound_channel_request() {
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
                }
                _ => panic!("Unexpected event"),
        }
@@ -9053,7 +9061,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                force_closing_node = 1;
                counterparty_node = 0;
        }
-       nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
        check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
@@ -9489,7 +9497,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 
-       nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
index 37ca25babe9eb48e48a443a143633d5996c467a1..7f725a42141013b778ece0e0b2e5a478c71a5d3c 100644 (file)
@@ -584,7 +584,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
        let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
index 237a7fa316022f53f0a6e291cc8bef70ca5a1734..508ba414037107b260938b6c82bdbf2ad107dd4a 100644 (file)
@@ -817,7 +817,7 @@ fn test_0conf_close_no_early_chan_update() {
        // We can use the channel immediately, but won't generate a channel_update until we get confs
        send_payment(&nodes[0], &[&nodes[1]], 100_000);
 
-       nodes[0].node.force_close_all_channels();
+       nodes[0].node.force_close_all_channels_broadcasting_latest_txn();
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
        let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());
index 14ab5ee3c4b83ebd3dff5e00d05daf5e597ea1e4..caba7753f3fbb9bbf9b5364f6ab2488f8e6a6503 100644 (file)
@@ -459,33 +459,33 @@ pub enum Event {
        /// Indicates a request to open a new channel by a peer.
        ///
        /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
-       /// request, call [`ChannelManager::force_close_channel`].
+       /// request, call [`ChannelManager::force_close_without_broadcasting_txn`].
        ///
        /// The event is only triggered when a new open channel request is received and the
        /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true.
        ///
        /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
-       /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+       /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
        /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
        OpenChannelRequest {
                /// The temporary channel ID of the channel requested to be opened.
                ///
                /// When responding to the request, the `temporary_channel_id` should be passed
                /// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept,
-               /// or through [`ChannelManager::force_close_channel`] to reject.
+               /// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject.
                ///
                /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
-               /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+               /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
                temporary_channel_id: [u8; 32],
                /// The node_id of the counterparty requesting to open the channel.
                ///
                /// When responding to the request, the `counterparty_node_id` should be passed
                /// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to
-               /// accept the request, or through [`ChannelManager::force_close_channel`] to reject the
+               /// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the
                /// request.
                ///
                /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
-               /// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
+               /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
                counterparty_node_id: PublicKey,
                /// The channel value of the requested channel.
                funding_satoshis: u64,