Merge pull request #1106 from TheBlueMatt/2021-10-no-perm-err-broadcast
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 29 Sep 2022 22:02:07 +0000 (22:02 +0000)
committerGitHub <noreply@github.com>
Thu, 29 Sep 2022 22:02:07 +0000 (22:02 +0000)
Do not broadcast commitment txn on Permanent mon update failure

19 files changed:
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/utils/test_persister.rs
lightning-invoice/src/payment.rs
lightning-persister/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/util/errors.rs
lightning/src/util/persist.rs
lightning/src/util/test_utils.rs

index 062b5fb1b01179f7606276fc1e6d076311dc766d..a02d003e1b7a88ab2d70bb490d5280a079513f6d 100644 (file)
@@ -32,7 +32,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
 
 use lightning::chain;
-use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
 use lightning::chain::transaction::OutPoint;
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
@@ -124,7 +124,7 @@ impl TestChainMonitor {
        }
 }
 impl chain::Watch<EnforcingSigner> for TestChainMonitor {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
                let mut ser = VecWriter(Vec::new());
                monitor.write(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
@@ -134,7 +134,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                self.chain_monitor.watch_channel(funding_txo, monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
                let mut map_lock = self.latest_monitors.lock().unwrap();
                let mut map_entry = match map_lock.entry(funding_txo) {
                        hash_map::Entry::Occupied(entry) => entry,
@@ -270,7 +270,7 @@ fn check_api_err(api_err: APIError) {
                                _ => panic!("{}", err),
                        }
                },
-               APIError::MonitorUpdateFailed => {
+               APIError::MonitorUpdateInProgress => {
                        // We can (obviously) temp-fail a monitor update
                },
                APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"),
@@ -363,7 +363,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
                        let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
-                               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
+                               Arc::new(TestPersister {
+                                       update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+                               }), Arc::clone(&keys_manager)));
 
                        let mut config = UserConfig::default();
                        config.channel_config.forwarding_fee_proportional_millionths = 0;
@@ -383,7 +385,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                    let keys_manager = Arc::clone(& $keys_manager);
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
                        let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
-                               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
+                               Arc::new(TestPersister {
+                                       update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+                               }), Arc::clone(& $keys_manager)));
 
                        let mut config = UserConfig::default();
                        config.channel_config.forwarding_fee_proportional_millionths = 0;
@@ -412,7 +416,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                        let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
                        for (funding_txo, mon) in monitors.drain() {
-                               assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok());
+                               assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
+                                       ChannelMonitorUpdateStatus::Completed);
                        }
                        res
                } }
@@ -889,12 +894,12 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        // bit-twiddling mutations to have similar effects. This is probably overkill, but no
                        // harm in doing so.
 
-                       0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
-                       0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
-                       0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
+                       0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+                       0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+                       0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
 
                        0x08 => {
                                if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
@@ -1119,9 +1124,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                // after we resolve all pending events.
                                // First make sure there are no pending monitor updates, resetting the error state
                                // and calling force_channel_monitor_updated for each monitor.
-                               *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
-                               *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
-                               *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
+                               *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+                               *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+                               *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
 
                                if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
                                        monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
index 9f1580288cac0dda4d67f7dc8a02522e4bd0627a..7edba55878f10d274e4232833516ac418690dcc5 100644 (file)
@@ -29,7 +29,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
 
 use lightning::chain;
-use lightning::chain::{BestBlock, Confirm, Listen};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen};
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use lightning::chain::chainmonitor;
 use lightning::chain::transaction::OutPoint;
@@ -389,7 +389,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
 
        let broadcast = Arc::new(TestBroadcaster{ txn_broadcasted: Mutex::new(Vec::new()) });
        let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(),
-               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) })));
+               Arc::new(TestPersister { update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) })));
 
        let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), inbound_payment_key: KeyMaterial(inbound_payment_key.try_into().unwrap()), counter: AtomicU64::new(0) });
        let mut config = UserConfig::default();
index 7ca1ff96d05ae5f0311a9702836800c01f012b65..44675fa787294b4a63144c9a2cddfacc4b9b33a6 100644 (file)
@@ -7,14 +7,14 @@ use lightning::util::enforcing_trait_impls::EnforcingSigner;
 use std::sync::Mutex;
 
 pub struct TestPersister {
-       pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+       pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
 }
 impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 }
index ee85fab0b5dbe8081d3b21e367c36125c28ecf92..cadb595e719276e5fb8aa88f74c347b6d185ee57 100644 (file)
@@ -474,11 +474,11 @@ where
                                },
                                PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, results } => {
                                        // If a `PartialFailure` event returns a result that is an `Ok()`, it means that
-                                       // part of our payment is retried. When we receive `MonitorUpdateFailed`, it
+                                       // part of our payment is retried. When we receive `MonitorUpdateInProgress`, it
                                        // means that we are still waiting for our channel monitor update to be completed.
                                        for (result, path) in results.iter().zip(route.paths.into_iter()) {
                                                match result {
-                                                       Ok(_) | Err(APIError::MonitorUpdateFailed) => {
+                                                       Ok(_) | Err(APIError::MonitorUpdateInProgress) => {
                                                                self.process_path_inflight_htlcs(payment_hash, path);
                                                        },
                                                        _ => {},
@@ -578,11 +578,11 @@ where
                        },
                        Err(PaymentSendFailure::PartialFailure { failed_paths_retry, results, .. }) => {
                                // If a `PartialFailure` error contains a result that is an `Ok()`, it means that
-                               // part of our payment is retried. When we receive `MonitorUpdateFailed`, it
+                               // part of our payment is retried. When we receive `MonitorUpdateInProgress`, it
                                // means that we are still waiting for our channel monitor update to complete.
                                for (result, path) in results.iter().zip(route.unwrap().paths.into_iter()) {
                                        match result {
-                                               Ok(_) | Err(APIError::MonitorUpdateFailed) => {
+                                               Ok(_) | Err(APIError::MonitorUpdateInProgress) => {
                                                        self.process_path_inflight_htlcs(payment_hash, path);
                                                },
                                                _ => {},
@@ -782,7 +782,7 @@ mod tests {
        use std::time::{SystemTime, Duration};
        use time_utils::tests::SinceEpoch;
        use DEFAULT_EXPIRY_TIME;
-       use lightning::util::errors::APIError::{ChannelUnavailable, MonitorUpdateFailed};
+       use lightning::util::errors::APIError::{ChannelUnavailable, MonitorUpdateInProgress};
 
        fn invoice(payment_preimage: PaymentPreimage) -> Invoice {
                let payment_hash = Sha256::hash(&payment_preimage.0);
@@ -1683,7 +1683,7 @@ mod tests {
                        .fails_with_partial_failure(
                                retry.clone(), OnAttempt(1),
                                Some(vec![
-                                       Err(ChannelUnavailable { err: "abc".to_string() }), Err(MonitorUpdateFailed)
+                                       Err(ChannelUnavailable { err: "abc".to_string() }), Err(MonitorUpdateInProgress)
                                ]))
                        .expect_send(Amount::ForInvoice(final_value_msat));
 
@@ -1695,7 +1695,7 @@ mod tests {
                invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
                let inflight_map = invoice_payer.create_inflight_map();
 
-               // Only the second path, which failed with `MonitorUpdateFailed` should be added to our
+               // Only the second path, which failed with `MonitorUpdateInProgress` should be added to our
                // inflight map because retries are disabled.
                assert_eq!(inflight_map.0.len(), 2);
        }
@@ -1714,7 +1714,7 @@ mod tests {
                        .fails_with_partial_failure(
                                retry.clone(), OnAttempt(1),
                                Some(vec![
-                                       Ok(()), Err(MonitorUpdateFailed)
+                                       Ok(()), Err(MonitorUpdateInProgress)
                                ]))
                        .expect_send(Amount::ForInvoice(final_value_msat));
 
@@ -2039,7 +2039,7 @@ mod tests {
                }
 
                fn fails_on_attempt(self, attempt: usize) -> Self {
-                       let failure = PaymentSendFailure::ParameterError(APIError::MonitorUpdateFailed);
+                       let failure = PaymentSendFailure::ParameterError(APIError::MonitorUpdateInProgress);
                        self.fails_with(failure, OnAttempt(attempt))
                }
 
index 03e42e43e42c2b425f2ba0d830c542952883e286..c36609351c7d4280cd4ac5a65265ad923ddc76de 100644 (file)
@@ -138,7 +138,7 @@ mod tests {
        use bitcoin::blockdata::block::{Block, BlockHeader};
        use bitcoin::hashes::hex::FromHex;
        use bitcoin::{Txid, TxMerkleNode};
-       use lightning::chain::ChannelMonitorUpdateErr;
+       use lightning::chain::ChannelMonitorUpdateStatus;
        use lightning::chain::chainmonitor::Persist;
        use lightning::chain::transaction::OutPoint;
        use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
@@ -270,7 +270,7 @@ mod tests {
                        index: 0
                };
                match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
                        _ => panic!("unexpected result from persisting new channel")
                }
 
@@ -307,7 +307,7 @@ mod tests {
                        index: 0
                };
                match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
                        _ => panic!("unexpected result from persisting new channel")
                }
 
index 9f15a2a27996ba1e40faf27a97f80c27fb434659..2479e8f7e7b5efd00582d2e9aaac7ecd9cfa9f31 100644 (file)
@@ -27,7 +27,7 @@ use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::hash_types::Txid;
 
 use chain;
-use chain::{ChannelMonitorUpdateErr, Filter, WatchedOutput};
+use chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::{OutPoint, TransactionData};
@@ -78,20 +78,21 @@ impl MonitorUpdateId {
 ///
 /// Each method can return three possible values:
 ///  * If persistence (including any relevant `fsync()` calls) happens immediately, the
-///    implementation should return `Ok(())`, indicating normal channel operation should continue.
+///    implementation should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal
+///    channel operation should continue.
 ///  * If persistence happens asynchronously, implementations should first ensure the
 ///    [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return
-///    `Err(ChannelMonitorUpdateErr::TemporaryFailure)` while the update continues in the
-///    background. Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be
-///    called with the corresponding [`MonitorUpdateId`].
+///    [`ChannelMonitorUpdateStatus::InProgress`] while the update continues in the background.
+///    Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be called with
+///    the corresponding [`MonitorUpdateId`].
 ///
 ///    Note that unlike the direct [`chain::Watch`] interface,
 ///    [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
 ///
 ///  * If persistence fails for some reason, implementations should return
-///    `Err(ChannelMonitorUpdateErr::PermanentFailure)`, in which case the channel will likely be
+///    [`ChannelMonitorUpdateStatus::PermanentFailure`], in which case the channel will likely be
 ///    closed without broadcasting the latest state. See
-///    [`ChannelMonitorUpdateErr::PermanentFailure`] for more details.
+///    [`ChannelMonitorUpdateStatus::PermanentFailure`] for more details.
 pub trait Persist<ChannelSigner: Sign> {
        /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
        /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
@@ -101,14 +102,14 @@ pub trait Persist<ChannelSigner: Sign> {
        /// and the stored channel data). Note that you **must** persist every new monitor to disk.
        ///
        /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
-       /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+       /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
        ///
        /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
-       /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+       /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+       fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
 
        /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
        /// update.
@@ -136,14 +137,14 @@ pub trait Persist<ChannelSigner: Sign> {
        /// whereas updates are small and `O(1)`.
        ///
        /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
-       /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+       /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
        ///
        /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
        /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
-       /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+       /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+       fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
 }
 
 struct MonitorHolder<ChannelSigner: Sign> {
@@ -151,9 +152,9 @@ struct MonitorHolder<ChannelSigner: Sign> {
        /// The full set of pending monitor updates for this Channel.
        ///
        /// Note that this lock must be held during updates to prevent a race where we call
-       /// update_persisted_channel, the user returns a TemporaryFailure, and then calls
-       /// channel_monitor_updated immediately, racing our insertion of the pending update into the
-       /// contained Vec.
+       /// update_persisted_channel, the user returns a
+       /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
+       /// immediately, racing our insertion of the pending update into the contained Vec.
        ///
        /// Beyond the synchronization of updates themselves, we cannot handle user events until after
        /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
@@ -287,20 +288,20 @@ where C::Target: chain::Filter,
                                        if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
                                                // If there are not ChainSync persists awaiting completion, go ahead and
                                                // set last_chain_persist_height here - we wouldn't want the first
-                                               // TemporaryFailure to always immediately be considered "overly delayed".
+                                               // InProgress to always immediately be considered "overly delayed".
                                                monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
                                        }
                                }
 
                                log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
                                match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
-                                       Ok(()) =>
+                                       ChannelMonitorUpdateStatus::Completed =>
                                                log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
-                                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {
+                                       ChannelMonitorUpdateStatus::PermanentFailure => {
                                                monitor_state.channel_perm_failed.store(true, Ordering::Release);
                                                self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
                                        },
-                                       Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
+                                       ChannelMonitorUpdateStatus::InProgress => {
                                                log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
                                                pending_monitor_updates.push(update_id);
                                        },
@@ -400,12 +401,12 @@ where C::Target: chain::Filter,
        }
 
        /// Indicates the persistence of a [`ChannelMonitor`] has completed after
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation.
+       /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
        ///
        /// Thus, the anticipated use is, at a high level:
        ///  1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
        ///     update to disk and begins updating any remote (e.g. watchtower/backup) copies,
-       ///     returning [`ChannelMonitorUpdateErr::TemporaryFailure`],
+       ///     returning [`ChannelMonitorUpdateStatus::InProgress`],
        ///  2) once all remote copies are updated, you call this function with the
        ///     `completed_update_id` that completed, and once all pending updates have completed the
        ///     channel will be re-enabled.
@@ -438,10 +439,10 @@ where C::Target: chain::Filter,
                                if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) {
                                        // If there are still monitor updates pending (or an old monitor update
                                        // finished after a later one perm-failed), we cannot yet construct an
-                                       // UpdateCompleted event.
+                                       // Completed event.
                                        return Ok(());
                                }
-                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
+                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
                                        funding_txo,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
                                }], monitor_data.monitor.get_counterparty_node_id()));
@@ -464,7 +465,7 @@ where C::Target: chain::Filter,
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
                let monitors = self.monitors.read().unwrap();
                let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
-               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
+               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
                        funding_txo,
                        monitor_update_id,
                }], counterparty_node_id));
@@ -570,27 +571,31 @@ where C::Target: chain::Filter,
        ///
        /// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
        /// monitors lock.
-       fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus {
                let mut monitors = self.monitors.write().unwrap();
                let entry = match monitors.entry(funding_outpoint) {
                        hash_map::Entry::Occupied(_) => {
                                log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
-                               return Err(ChannelMonitorUpdateErr::PermanentFailure)},
+                               return ChannelMonitorUpdateStatus::PermanentFailure
+                       },
                        hash_map::Entry::Vacant(e) => e,
                };
                log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
                let update_id = MonitorUpdateId::from_new_monitor(&monitor);
                let mut pending_monitor_updates = Vec::new();
                let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
-               if persist_res.is_err() {
-                       log_error!(self.logger, "Failed to persist new ChannelMonitor for channel {}: {:?}", log_funding_info!(monitor), persist_res);
-               } else {
-                       log_trace!(self.logger, "Finished persisting new ChannelMonitor for channel {}", log_funding_info!(monitor));
-               }
-               if persist_res == Err(ChannelMonitorUpdateErr::PermanentFailure) {
-                       return persist_res;
-               } else if persist_res.is_err() {
-                       pending_monitor_updates.push(update_id);
+               match persist_res {
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
+                               pending_monitor_updates.push(update_id);
+                       },
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               log_error!(self.logger, "Persistence of new ChannelMonitor for channel {} failed", log_funding_info!(monitor));
+                               return persist_res;
+                       },
+                       ChannelMonitorUpdateStatus::Completed => {
+                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
+                       }
                }
                if let Some(ref chain_source) = self.chain_source {
                        monitor.load_outputs_to_watch(chain_source);
@@ -606,7 +611,7 @@ where C::Target: chain::Filter,
 
        /// Note that we persist the given `ChannelMonitor` update while holding the
        /// `ChainMonitor` monitors lock.
-       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
                // Update the monitor that watches the channel referred to by the given outpoint.
                let monitors = self.monitors.read().unwrap();
                match monitors.get(&funding_txo) {
@@ -619,7 +624,7 @@ where C::Target: chain::Filter,
                                #[cfg(any(test, fuzzing))]
                                panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
                                #[cfg(not(any(test, fuzzing)))]
-                               Err(ChannelMonitorUpdateErr::PermanentFailure)
+                               ChannelMonitorUpdateStatus::PermanentFailure
                        },
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
@@ -633,20 +638,23 @@ where C::Target: chain::Filter,
                                let update_id = MonitorUpdateId::from_monitor_update(&update);
                                let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
                                let persist_res = self.persister.update_persisted_channel(funding_txo, &Some(update), monitor, update_id);
-                               if let Err(e) = persist_res {
-                                       if e == ChannelMonitorUpdateErr::TemporaryFailure {
+                               match persist_res {
+                                       ChannelMonitorUpdateStatus::InProgress => {
                                                pending_monitor_updates.push(update_id);
-                                       } else {
+                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
+                                       },
+                                       ChannelMonitorUpdateStatus::PermanentFailure => {
                                                monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                       }
-                                       log_error!(self.logger, "Failed to persist ChannelMonitor update for channel {}: {:?}", log_funding_info!(monitor), e);
-                               } else {
-                                       log_trace!(self.logger, "Finished persisting ChannelMonitor update for channel {}", log_funding_info!(monitor));
+                                               log_error!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} failed", log_funding_info!(monitor));
+                                       },
+                                       ChannelMonitorUpdateStatus::Completed => {
+                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+                                       },
                                }
                                if update_res.is_err() {
-                                       Err(ChannelMonitorUpdateErr::PermanentFailure)
+                                       ChannelMonitorUpdateStatus::PermanentFailure
                                } else if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
-                                       Err(ChannelMonitorUpdateErr::PermanentFailure)
+                                       ChannelMonitorUpdateStatus::PermanentFailure
                                } else {
                                        persist_res
                                }
@@ -723,7 +731,7 @@ mod tests {
        use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
        use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
        use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
-       use chain::{ChannelMonitorUpdateErr, Confirm, Watch};
+       use chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
        use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
        use ln::channelmanager::{self, PaymentSendFailure};
        use ln::functional_test_utils::*;
@@ -747,7 +755,7 @@ mod tests {
                let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
                chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
-               chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+               chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
                nodes[1].node.claim_funds(payment_preimage_1);
                check_added_monitors!(nodes[1], 1);
@@ -756,7 +764,7 @@ mod tests {
                check_added_monitors!(nodes[1], 1);
                expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
 
-               chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+               chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
                let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
                assert_eq!(persistences.len(), 1);
@@ -834,7 +842,7 @@ mod tests {
 
                // Temp-fail the block connection which will hold the channel-closed event
                chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
                // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
                // channel is now closed, but the ChannelManager doesn't know that yet.
@@ -850,7 +858,7 @@ mod tests {
 
                // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
                // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
-               chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
                unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)),
                        true, APIError::ChannelUnavailable { ref err },
                        assert!(err.contains("ChannelMonitor storage failure")));
@@ -896,7 +904,7 @@ mod tests {
                create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
                chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
                connect_blocks(&nodes[0], 1);
                // Before processing events, the ChannelManager will still think the Channel is open and
index 748adbd035983b574bccd4836e2f6aecd48a9323..8f8cbdf448adb80d7fa50ee279c86862c1d0abd6 100644 (file)
@@ -76,12 +76,14 @@ pub struct ChannelMonitorUpdate {
        /// increasing and increase by one for each new update, with one exception specified below.
        ///
        /// This sequence number is also used to track up to which points updates which returned
-       /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
+       /// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
        /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
        ///
        /// The only instance where update_id values are not strictly increasing is the case where we
        /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
        /// its docs for more details.
+       ///
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
        pub update_id: u64,
 }
 
@@ -132,10 +134,10 @@ pub enum MonitorEvent {
        CommitmentTxConfirmed(OutPoint),
 
        /// Indicates a [`ChannelMonitor`] update has completed. See
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`] for more information on how this is used.
+       /// [`ChannelMonitorUpdateStatus::InProgress`] for more information on how this is used.
        ///
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
-       UpdateCompleted {
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
+       Completed {
                /// The funding outpoint of the [`ChannelMonitor`] that was updated
                funding_txo: OutPoint,
                /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
@@ -147,15 +149,15 @@ pub enum MonitorEvent {
        },
 
        /// Indicates a [`ChannelMonitor`] update has failed. See
-       /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more information on how this is used.
+       /// [`ChannelMonitorUpdateStatus::PermanentFailure`] for more information on how this is used.
        ///
-       /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
+       /// [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
        UpdateFailed(OutPoint),
 }
 impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
-       // Note that UpdateCompleted and UpdateFailed are currently never serialized to disk as they are
+       // Note that Completed and UpdateFailed are currently never serialized to disk as they are
        // generated only in ChainMonitor
-       (0, UpdateCompleted) => {
+       (0, Completed) => {
                (0, funding_txo, required),
                (2, monitor_update_id, required),
        },
@@ -1314,14 +1316,20 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
        }
 
        /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of
-       /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of
-       /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows
-       /// a higher revocation secret than the holder commitment number we are aware of. Broadcasting these
-       /// transactions are UNSAFE, as they allow counterparty side to punish you. Nevertheless you may want to
-       /// broadcast them if counterparty don't close channel with his higher commitment transaction after a
-       /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
-       /// out-of-band the other node operator to coordinate with him if option is available to you.
-       /// In any-case, choice is up to the user.
+       /// the Channel was out-of-date.
+       ///
+       /// You may also use this to broadcast the latest local commitment transaction, either because
+       /// a monitor update failed with [`ChannelMonitorUpdateStatus::PermanentFailure`] or because we've
+       /// fallen behind (i.e. we've received proof that our counterparty side knows a revocation
+       /// secret we gave them that they shouldn't know).
+       ///
+       /// Broadcasting these transactions in the second case is UNSAFE, as they allow counterparty
+       /// side to punish you. Nevertheless you may want to broadcast them if counterparty doesn't
+       /// close channel with their commitment transaction after a substantial amount of time. Best
+       /// may be to contact the other node operator out-of-band to coordinate other options available
+       /// to you. In any-case, the choice is up to you.
+       ///
+       /// [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
        pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
        where L::Target: Logger {
                self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
@@ -2248,7 +2256,9 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        if *should_broadcast {
                                                self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
                                        } else if !self.holder_tx_signed {
-                                               log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
+                                               log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
+                                               log_error!(logger, "    in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id()));
+                                               log_error!(logger, "    Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
                                        } else {
                                                // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager
                                                // will still give us a ChannelForceClosed event with !should_broadcast, but we
index f0544679817db3434a679662901240b99d68da54..c54f9b1d7eba6814cb831bfd3c91c112fcc096cf 100644 (file)
@@ -187,68 +187,81 @@ pub trait Confirm {
        fn get_relevant_txids(&self) -> Vec<Txid>;
 }
 
-/// An error enum representing a failure to persist a channel monitor update.
+/// An enum representing the status of a channel monitor update persistence.
 #[derive(Clone, Copy, Debug, PartialEq)]
-pub enum ChannelMonitorUpdateErr {
+pub enum ChannelMonitorUpdateStatus {
+       /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
+       /// have been updated.
+       ///
+       /// This includes performing any `fsync()` calls required to ensure the update is guaranteed to
+       /// be available on restart even if the application crashes.
+       Completed,
        /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
        /// our state failed, but is expected to succeed at some point in the future).
        ///
        /// Such a failure will "freeze" a channel, preventing us from revoking old states or
-       /// submitting new commitment transactions to the counterparty. Once the update(s) that failed
-       /// have been successfully applied, a [`MonitorEvent::UpdateCompleted`] event should be returned
-       /// via [`Watch::release_pending_monitor_events`] which will then restore the channel to an
-       /// operational state.
-       ///
-       /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
-       /// you return a TemporaryFailure you must ensure that it is written to disk safely before
-       /// writing out the latest ChannelManager state.
-       ///
-       /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
-       /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
-       /// to claim it on this channel) and those updates must be applied wherever they can be. At
-       /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
-       /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
-       /// the channel which would invalidate previous ChannelMonitors are not made when a channel has
-       /// been "frozen".
-       ///
-       /// Note that even if updates made after TemporaryFailure succeed you must still provide a
-       /// [`MonitorEvent::UpdateCompleted`] to ensure you have the latest monitor and re-enable
-       /// normal channel operation. Note that this is normally generated through a call to
-       /// [`ChainMonitor::channel_monitor_updated`].
-       ///
-       /// Note that the update being processed here will not be replayed for you when you return a
-       /// [`MonitorEvent::UpdateCompleted`] event via [`Watch::release_pending_monitor_events`], so
-       /// you must store the update itself on your own local disk prior to returning a
-       /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
-       /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
-       /// reload-time.
+       /// submitting new commitment transactions to the counterparty. Once the update(s) which failed
+       /// have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the
+       /// channel to an operational state.
+       ///
+       /// Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`].
+       /// If you return this error you must ensure that it is written to disk safely before writing
+       /// the latest [`ChannelManager`] state, or you should return [`PermanentFailure`] instead.
+       ///
+       /// Even when a channel has been "frozen", updates to the [`ChannelMonitor`] can continue to
+       /// occur (e.g. if an inbound HTLC which we forwarded was claimed upstream, resulting in us
+       /// attempting to claim it on this channel) and those updates must still be persisted.
+       ///
+       /// No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s
+       /// until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later
+       /// monitor update for the same channel.
        ///
        /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
        /// remote location (with local copies persisted immediately), it is anticipated that all
-       /// updates will return TemporaryFailure until the remote copies could be updated.
+       /// updates will return [`InProgress`] until the remote copies could be updated.
        ///
-       /// [`ChainMonitor::channel_monitor_updated`]: chainmonitor::ChainMonitor::channel_monitor_updated
-       TemporaryFailure,
-       /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
-       /// different watchtower and cannot update with all watchtowers that were previously informed
-       /// of this channel).
+       /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+       /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
+       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+       InProgress,
+       /// Used to indicate no further channel monitor updates will be allowed (likely a disk failure
+       /// or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable).
        ///
-       /// At reception of this error, ChannelManager will force-close the channel and return at
-       /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at
-       /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel
-       /// update must be rejected.
+       /// When this is returned, [`ChannelManager`] will force-close the channel but *not* broadcast
+       /// our current commitment transaction. This avoids a dangerous case where a local disk failure
+       /// (e.g. the Linux-default remounting of the disk as read-only) causes [`PermanentFailure`]s
+       /// for all monitor updates. If we were to broadcast our latest commitment transaction and then
+       /// restart, we could end up reading a previous [`ChannelMonitor`] and [`ChannelManager`],
+       /// revoking our now-broadcasted state before seeing it confirm and losing all our funds.
        ///
-       /// This failure may also signal a failure to update the local persisted copy of one of
-       /// the channel monitor instance.
+       /// Note that this is somewhat of a tradeoff - if the disk is really gone and we may have lost
+       /// the data permanently, we really should broadcast immediately. If the data can be recovered
+       /// with manual intervention, we'd rather close the channel, rejecting future updates to it,
+       /// and broadcast the latest state only if we have HTLCs to claim which are timing out (which
+       /// we do as long as blocks are connected).
        ///
-       /// Note that even when you fail a holder commitment transaction update, you must store the
-       /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor
-       /// broadcasts it (e.g distributed channel-monitor deployment)
+       /// In order to broadcast the latest local commitment transaction, you'll need to call
+       /// [`ChannelMonitor::get_latest_holder_commitment_txn`] and broadcast the resulting
+       /// transactions once you've safely ensured no further channel updates can be generated by your
+       /// [`ChannelManager`].
+       ///
+       /// Note that at least one final [`ChannelMonitorUpdate`] may still be provided, which must
+       /// still be processed by a running [`ChannelMonitor`]. This final update will mark the
+       /// [`ChannelMonitor`] as finalized, ensuring no further updates (e.g. revocation of the latest
+       /// commitment transaction) are allowed.
+       ///
+       /// Note that even if you return a [`PermanentFailure`] due to unavailability of secondary
+       /// [`ChannelMonitor`] copies, you should still make an attempt to store the update where
+       /// possible to ensure you can claim HTLC outputs on the latest commitment transaction
+       /// broadcasted later.
        ///
        /// In case of distributed watchtowers deployment, the new version must be written to disk, as
        /// state may have been stored but rejected due to a block forcing a commitment broadcast. This
        /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
        /// lagging behind on block processing.
+       ///
+       /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        PermanentFailure,
 }
 
@@ -267,10 +280,10 @@ pub enum ChannelMonitorUpdateErr {
 /// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing
 /// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it
 /// could result in a revoked transaction being broadcast, allowing the counterparty to claim all
-/// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle
+/// funds in the channel. See [`ChannelMonitorUpdateStatus`] for more details about how to handle
 /// multiple instances.
 ///
-/// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+/// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
 pub trait Watch<ChannelSigner: Sign> {
        /// Watches a channel identified by `funding_txo` using `monitor`.
        ///
@@ -278,21 +291,21 @@ pub trait Watch<ChannelSigner: Sign> {
        /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
        /// calling [`block_connected`] and [`block_disconnected`] on the monitor.
        ///
-       /// Note: this interface MUST error with `ChannelMonitorUpdateErr::PermanentFailure` if
+       /// Note: this interface MUST error with [`ChannelMonitorUpdateStatus::PermanentFailure`] if
        /// the given `funding_txo` has previously been registered via `watch_channel`.
        ///
        /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch
        /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected
        /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
 
        /// Updates a channel identified by `funding_txo` by applying `update` to its monitor.
        ///
        /// Implementations must call [`update_monitor`] with the given update. See
-       /// [`ChannelMonitorUpdateErr`] for invariants around returning an error.
+       /// [`ChannelMonitorUpdateStatus`] for invariants around returning an error.
        ///
        /// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor
-       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
+       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus;
 
        /// Returns any monitor events since the last call. Subsequent calls must only return new
        /// events.
@@ -302,7 +315,7 @@ pub trait Watch<ChannelSigner: Sign> {
        /// to disk.
        ///
        /// For details on asynchronous [`ChannelMonitor`] updating and returning
-       /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
+       /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
        fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
 }
 
@@ -321,9 +334,9 @@ pub trait Watch<ChannelSigner: Sign> {
 /// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter`
 /// should not block on I/O. Implementations should instead queue the newly monitored data to be
 /// processed later. Then, in order to block until the data has been processed, any [`Watch`]
-/// invocation that has called the `Filter` must return [`TemporaryFailure`].
+/// invocation that has called the `Filter` must return [`InProgress`].
 ///
-/// [`TemporaryFailure`]: ChannelMonitorUpdateErr::TemporaryFailure
+/// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
 /// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki
 /// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki
 pub trait Filter {
index ced4ceee95577007e51da30e8f703942f8619e04..15d46b04688930d03525b6347d6b714a28713b28 100644 (file)
@@ -7,7 +7,7 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
+//! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
 //! monitor updates.
 //! There are a bunch of these as their handling is relatively error-prone so they are split out
 //! here. See also the chanmon_fail_consistency fuzz test.
@@ -18,7 +18,7 @@ use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
 use chain::transaction::OutPoint;
-use chain::{ChannelMonitorUpdateErr, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
 use ln::channel::AnnouncementSigsState;
 use ln::msgs;
@@ -50,7 +50,7 @@ fn test_simple_monitor_permanent_update_fail() {
        create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
        let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
        unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {});
        check_added_monitors!(nodes[0], 2);
 
@@ -65,6 +65,8 @@ fn test_simple_monitor_permanent_update_fail() {
                _ => panic!("Unexpected event"),
        };
 
+       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+
        // TODO: Once we hit the chain with the failure transaction we should check that we get a
        // PaymentPathFailed event
 
@@ -114,7 +116,7 @@ fn test_monitor_and_persister_update_fail() {
                        &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                chain_mon
        };
        let header = BlockHeader {
@@ -127,8 +129,8 @@ fn test_monitor_and_persister_update_fail() {
        };
        chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
 
-       // Set the persister's return value to be a TemporaryFailure.
-       persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       // Set the persister's return value to be a InProgress.
+       persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        // Try to update ChannelMonitor
        nodes[1].node.claim_funds(preimage);
@@ -140,12 +142,12 @@ fn test_monitor_and_persister_update_fail() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) {
                if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Check that even though the persister is returning a TemporaryFailure,
+                       // Check that even though the persister is returning a InProgress,
                        // because the update is bogus, ultimately the error that's returned
                        // should be a PermanentFailure.
-                       if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
-                       logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1);
-                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
+                       logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        } else { assert!(false); };
 
@@ -165,10 +167,10 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 
        let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        {
-               unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {});
+               unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateInProgress, {});
                check_added_monitors!(nodes[0], 1);
        }
 
@@ -182,7 +184,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
                reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -218,8 +220,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        // Now set it to failed again...
        let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
        {
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-               unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateInProgress, {});
                check_added_monitors!(nodes[0], 1);
        }
 
@@ -259,8 +261,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // * First we route a payment, then get a temporary monitor update failure when trying to
        //   route a second payment. We then claim the first payment.
        // * If disconnect_count is set, we will disconnect at this point (which is likely as
-       //   TemporaryFailure likely indicates net disconnect which resulted in failing to update
-       //   the ChannelMonitor on a watchtower).
+       //   InProgress likely indicates net disconnect which resulted in failing to update the
+       //   ChannelMonitor on a watchtower).
        // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
        //   immediately, otherwise we wait disconnect and deliver them via the reconnect
        //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
@@ -282,8 +284,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
        {
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-               unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateInProgress, {});
                check_added_monitors!(nodes[0], 1);
        }
 
@@ -337,7 +339,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        }
 
        // Now fix monitor updating...
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -629,14 +631,14 @@ fn test_monitor_update_fail_cs() {
        let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -660,7 +662,7 @@ fn test_monitor_update_fail_cs() {
                        assert!(updates.update_fee.is_none());
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 
-                       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+                       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
                        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -670,7 +672,7 @@ fn test_monitor_update_fail_cs() {
                _ => panic!("Unexpected event"),
        }
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -722,7 +724,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -730,7 +732,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -779,7 +781,7 @@ fn test_monitor_update_raa_while_paused() {
        check_added_monitors!(nodes[1], 1);
        let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -788,10 +790,10 @@ fn test_monitor_update_raa_while_paused() {
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
+       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -871,7 +873,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now fail monitor updating.
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -887,7 +889,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                check_added_monitors!(nodes[0], 1);
        }
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
        send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
@@ -917,7 +919,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Restore monitor updating, ensuring we immediately get a fail-back update and a
        // update_add update.
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1121,7 +1123,7 @@ fn test_monitor_update_fail_reestablish() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
        nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
 
@@ -1158,7 +1160,7 @@ fn test_monitor_update_fail_reestablish() {
                get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
                        .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1223,7 +1225,7 @@ fn raa_no_response_awaiting_raa_state() {
        // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
        // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
        // then restore channel monitor updates.
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1232,10 +1234,10 @@ fn raa_no_response_awaiting_raa_state() {
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        // nodes[1] should be AwaitingRAA here!
@@ -1326,7 +1328,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
        // update.
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
@@ -1353,7 +1355,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now un-fail the monitor, which will result in B sending its original commitment update,
        // receiving the commitment update from A, and the resulting commitment dances.
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1432,7 +1434,7 @@ fn monitor_failed_no_reestablish_response() {
                check_added_monitors!(nodes[0], 1);
        }
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        let payment_event = SendEvent::from_event(events.pop().unwrap());
@@ -1458,7 +1460,7 @@ fn monitor_failed_no_reestablish_response() {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
        let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1532,7 +1534,7 @@ fn first_message_on_recv_ordering() {
        let payment_event = SendEvent::from_event(events.pop().unwrap());
        assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        // Deliver the final RAA for the first payment, which does not require a response. RAAs
        // generally require a commitment_signed, so the fact that we're expecting an opposite response
@@ -1551,7 +1553,7 @@ fn first_message_on_recv_ordering() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1595,7 +1597,7 @@ fn test_monitor_update_fail_claim() {
 
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
@@ -1614,7 +1616,7 @@ fn test_monitor_update_fail_claim() {
 
        // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
        // paused, so forward shouldn't succeed until we call channel_monitor_updated().
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let mut events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -1725,13 +1727,13 @@ fn test_monitor_update_on_pending_forwards() {
        nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1785,7 +1787,7 @@ fn monitor_update_claim_fail_no_response() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
@@ -1794,7 +1796,7 @@ fn monitor_update_claim_fail_no_response() {
        assert_eq!(events.len(), 0);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1832,19 +1834,19 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
        check_added_monitors!(nodes[0], 0);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
        let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -1884,7 +1886,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        }
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1956,19 +1958,19 @@ fn test_path_paused_mpp() {
 
        // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
        // (for the path 0 -> 2 -> 3) fails.
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
-       chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress));
 
        // Now check that we get the right return value, indicating that the first path succeeded but
-       // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
-       // some paths succeeded, preventing retry.
+       // the second got a MonitorUpdateInProgress err. This implies
+       // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
        if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) {
                assert_eq!(results.len(), 2);
                if let Ok(()) = results[0] {} else { panic!(); }
-               if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
+               if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
        } else { panic!(); }
        check_added_monitors!(nodes[0], 2);
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        // Pass the first HTLC of the payment along to nodes[3].
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -2265,7 +2267,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
        check_added_monitors!(nodes[0], 0);
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.claim_funds(payment_preimage_0);
        check_added_monitors!(nodes[0], 1);
        expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
@@ -2315,7 +2317,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                        nodes[0].node = &nodes_0_deserialized;
                        assert!(nodes_0_read.is_empty());
 
-                       nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+                       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
+                               ChannelMonitorUpdateStatus::Completed);
                        check_added_monitors!(nodes[0], 1);
                } else {
                        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
@@ -2359,7 +2362,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
 
        // If we finish updating the monitor, we should free the holding cell right away (this did
        // not occur prior to #756).
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
 
@@ -2558,8 +2561,8 @@ fn test_temporary_error_during_shutdown() {
 
        let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
@@ -2570,8 +2573,8 @@ fn test_temporary_error_during_shutdown() {
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
@@ -2579,7 +2582,7 @@ fn test_temporary_error_during_shutdown() {
 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
 
@@ -2612,7 +2615,7 @@ fn test_permanent_error_during_sending_shutdown() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
        check_closed_broadcast!(nodes[0], true);
@@ -2633,7 +2636,7 @@ fn test_permanent_error_during_handling_shutdown() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
        let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -2656,20 +2659,20 @@ fn double_temp_error() {
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
        let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        // `claim_funds` results in a ChannelMonitorUpdate.
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
        // which had some asserts that prevented it from being called twice.
        nodes[1].node.claim_funds(payment_preimage_2);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
index 55568f3d6c57cdeb4121cf58b2947a57405bef92..48a701e3b04fb712772a3a9a293536e839ee6de4 100644 (file)
@@ -3657,13 +3657,16 @@ impl<Signer: Sign> Channel<Signer> {
                log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
        }
 
-       /// Indicates that a ChannelMonitor update failed to be stored by the client and further
-       /// updates are partially paused.
-       /// This must be called immediately after the call which generated the ChannelMonitor update
-       /// which failed. The messages which were generated from that call which generated the
-       /// monitor update failure must *not* have been sent to the remote end, and must instead
-       /// have been dropped. They will be regenerated when monitor_updating_restored is called.
-       pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool,
+       /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
+       /// This must be called immediately after the [`chain::Watch`] call which returned
+       /// [`ChannelMonitorUpdateStatus::InProgress`].
+       /// The messages which were generated with the monitor update must *not* have been sent to the
+       /// remote end, and must instead have been dropped. They will be regenerated when
+       /// [`Self::monitor_updating_restored`] is called.
+       ///
+       /// [`chain::Watch`]: crate::chain::Watch
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
+       pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
                resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
index 8c12f5470b9ca0fd36f2e5410efeece305c99d71..f234ad9f91f555075804d87db115d98193f09619 100644 (file)
@@ -35,7 +35,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::{LockTime, secp256k1, Sequence};
 
 use chain;
-use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
+use chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
 use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
@@ -1212,13 +1212,13 @@ pub enum PaymentSendFailure {
        /// in over-/re-payment.
        ///
        /// The results here are ordered the same as the paths in the route object which was passed to
-       /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
-       /// retried (though there is currently no API with which to do so).
+       /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be
+       /// safely retried via [`ChannelManager::retry_payment`].
        ///
-       /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
-       /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
-       /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
-       /// with the latest update_id.
+       /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be
+       /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent
+       /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for
+       /// the next-hop channel with the latest update_id.
        PartialFailure {
                /// The errors themselves, in the same order as the route hops.
                results: Vec<Result<(), APIError>>,
@@ -1375,11 +1375,11 @@ macro_rules! remove_channel {
        }
 }
 
-macro_rules! handle_monitor_err {
+macro_rules! handle_monitor_update_res {
        ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
-                       ChannelMonitorUpdateErr::PermanentFailure => {
-                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
                                update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
                                // TODO: $failed_fails is dropped here, which will cause other channels to hit the
                                // chain in a confused state! We need to move them into the ChannelMonitor which
@@ -1391,11 +1391,11 @@ macro_rules! handle_monitor_err {
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
                                let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
-                                               $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
+                                               $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
-                       ChannelMonitorUpdateErr::TemporaryFailure => {
-                               log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
                                                log_bytes!($chan_id[..]),
                                                if $resend_commitment && $resend_raa {
                                                                match $action_type {
@@ -1414,13 +1414,16 @@ macro_rules! handle_monitor_err {
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
+                               $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
+                       ChannelMonitorUpdateStatus::Completed => {
+                               (Ok(()), false)
+                       },
                }
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+               let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
@@ -1428,41 +1431,20 @@ macro_rules! handle_monitor_err {
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
                debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
-       };
-}
-
-macro_rules! return_monitor_err {
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-       }
-}
-
-// Does not break in case of TemporaryFailure!
-macro_rules! maybe_break_monitor_err {
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
-                       (e, ChannelMonitorUpdateErr::PermanentFailure) => {
-                               break e;
-                       },
-                       (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
-               }
-       }
 }
 
 macro_rules! send_channel_ready {
@@ -1539,15 +1521,18 @@ macro_rules! handle_chan_restoration_locked {
                                // only case where we can get a new ChannelMonitorUpdate would be if we also
                                // have some commitment updates to send as well.
                                assert!($commitment_update.is_some());
-                               if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                       // channel_reestablish doesn't guarantee the order it returns is sensical
-                                       // for the messages it returns, but if we're setting what messages to
-                                       // re-transmit on monitor update success, we need to make sure it is sane.
-                                       let mut order = $order;
-                                       if $raa.is_none() {
-                                               order = RAACommitmentOrder::CommitmentFirst;
+                               match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                       ChannelMonitorUpdateStatus::Completed => {},
+                                       e => {
+                                               // channel_reestablish doesn't guarantee the order it returns is sensical
+                                               // for the messages it returns, but if we're setting what messages to
+                                               // re-transmit on monitor update success, we need to make sure it is sane.
+                                               let mut order = $order;
+                                               if $raa.is_none() {
+                                                       order = RAACommitmentOrder::CommitmentFirst;
+                                               }
+                                               break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
                                        }
-                                       break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
                                }
                        }
 
@@ -1898,13 +1883,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, channel_state, chan_entry);
-                                                               break result;
-                                                       }
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, channel_state, chan_entry);
+                                                       break result;
                                                }
                                        }
 
@@ -2520,19 +2504,30 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        channel_state, chan)
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
-                                                       // Note that MonitorUpdateFailed here indicates (per function docs)
-                                                       // that we will resend the commitment update once monitor updating
-                                                       // is restored. Therefore, we must return an error indicating that
-                                                       // it is unsafe to retry the payment wholesale, which we do in the
-                                                       // send_payment check for MonitorUpdateFailed, below.
-                                                       insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
-                                                       return Err(APIError::MonitorUpdateFailed);
+                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                               let chan_id = chan.get().channel_id();
+                                               match (update_err,
+                                                       handle_monitor_update_res!(self, update_err, channel_state, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false, true))
+                                               {
+                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {
+                                                               insert_outbound_payment!();
+                                                       },
+                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+                                                               // Note that MonitorUpdateInProgress here indicates (per function
+                                                               // docs) that we will resend the commitment update once monitor
+                                                               // updating completes. Therefore, we must return an error
+                                                               // indicating that it is unsafe to retry the payment wholesale,
+                                                               // which we do in the send_payment check for
+                                                               // MonitorUpdateInProgress, below.
+                                                               insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
+                                                               return Err(APIError::MonitorUpdateInProgress);
+                                                       },
+                                                       _ => unreachable!(),
                                                }
-                                               insert_outbound_payment!();
 
-                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
+                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: path.first().unwrap().pubkey,
                                                        updates: msgs::CommitmentUpdate {
@@ -2578,12 +2573,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// PaymentSendFailure for more info.
        ///
        /// In general, a path may raise:
-       ///  * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
+       ///  * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee,
        ///    node public key) is specified.
-       ///  * APIError::ChannelUnavailable if the next-hop channel is not available for updates
+       ///  * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
        ///    (including due to previous monitor update failure or new permanent monitor update
        ///    failure).
-       ///  * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+       ///  * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
        ///    relevant updates.
        ///
        /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
@@ -2647,8 +2642,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                for (res, path) in results.iter().zip(route.paths.iter()) {
                        if res.is_ok() { has_ok = true; }
                        if res.is_err() { has_err = true; }
-                       if let &Err(APIError::MonitorUpdateFailed) = res {
-                               // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
+                       if let &Err(APIError::MonitorUpdateInProgress) = res {
+                               // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a
                                // PartialFailure.
                                has_err = true;
                                has_ok = true;
@@ -3250,9 +3245,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        continue;
                                                                }
                                                        };
-                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                               handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
-                                                               continue;
+                                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {},
+                                                               e => {
+                                                                       handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+                                                                       continue;
+                                                               }
                                                        }
                                                        log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
                                                                add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
@@ -3521,23 +3519,26 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                let ret_err = match res {
                        Ok(Some((update_fee, commitment_signed, monitor_update))) => {
-                               if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                       let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
-                                       if drop { retain_channel = false; }
-                                       res
-                               } else {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: chan.get_counterparty_node_id(),
-                                               updates: msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs: Vec::new(),
-                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                       update_fee: Some(update_fee),
-                                                       commitment_signed,
-                                               },
-                                       });
-                                       Ok(())
+                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                       ChannelMonitorUpdateStatus::Completed => {
+                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id: chan.get_counterparty_node_id(),
+                                                       updates: msgs::CommitmentUpdate {
+                                                               update_add_htlcs: Vec::new(),
+                                                               update_fulfill_htlcs: Vec::new(),
+                                                               update_fail_htlcs: Vec::new(),
+                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                               update_fee: Some(update_fee),
+                                                               commitment_signed,
+                                                       },
+                                               });
+                                               Ok(())
+                                       },
+                                       e => {
+                                               let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+                                               if drop { retain_channel = false; }
+                                               res
+                                       }
                                }
                        },
                        Ok(None) => Ok(()),
@@ -4126,15 +4127,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
                                        if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
-                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
-                                                               payment_preimage, e);
-                                                       return ClaimFundsFromHop::MonitorUpdateFail(
-                                                               chan.get().get_counterparty_node_id(),
-                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
-                                                               Some(htlc_value_msat)
-                                                       );
+                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       ChannelMonitorUpdateStatus::Completed => {},
+                                                       e => {
+                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+                                                                       "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                                       payment_preimage, e);
+                                                               return ClaimFundsFromHop::MonitorUpdateFail(
+                                                                       chan.get().get_counterparty_node_id(),
+                                                                       handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                                       Some(htlc_value_msat)
+                                                               );
+                                                       }
                                                }
                                                if let Some((msg, commitment_signed)) = msgs {
                                                        log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
@@ -4157,10 +4161,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                },
                                Err((e, monitor_update)) => {
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
-                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
-                                                       payment_preimage, e);
+                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+                                                               "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                               payment_preimage, e);
+                                               },
                                        }
                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
                                        let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
@@ -4267,9 +4274,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // We update the ChannelMonitor on the backward link, after
                                        // receiving an offchain preimage event from the forward link (the
                                        // event being update_fulfill_htlc).
-                                       if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+                                       let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                                               // TODO: This needs to be handled somehow - if we receive a monitor update
+                                               // with a preimage we *must* somehow manage to propagate it to the upstream
+                                               // channel, or we must have an ability to receive the same event and try
+                                               // again on restart.
                                                log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                                                        payment_preimage, e);
+                                                       payment_preimage, update_res);
                                        }
                                        // Note that we do *not* set `claimed_htlc` to false here. In fact, this
                                        // totally could be a duplicate claim, but we have no way of knowing
@@ -4534,29 +4546,28 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
                // lock before watch_channel
-               if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
-                       match e {
-                               ChannelMonitorUpdateErr::PermanentFailure => {
-                                       // Note that we reply with the new channel_id in error messages if we gave up on the
-                                       // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                                       // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                                       // any messages referencing a previously-closed channel anyway.
-                                       // We do not do a force-close here as that would generate a monitor update for
-                                       // a monitor that we didn't manage to store (and that we don't care about - we
-                                       // don't respond with the funding_signed so the channel can never go on chain).
-                                       let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
-                                       assert!(failed_htlcs.is_empty());
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
-                               },
-                               ChannelMonitorUpdateErr::TemporaryFailure => {
-                                       // There's no problem signing a counterparty's funding transaction if our monitor
-                                       // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                                       // accepted payment from yet. We do, however, need to wait to send our channel_ready
-                                       // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
-                                       channel_ready = None; // Don't send the channel_ready now
-                               },
-                       }
+               match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
+                       ChannelMonitorUpdateStatus::Completed => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               // Note that we reply with the new channel_id in error messages if we gave up on the
+                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                               // any messages referencing a previously-closed channel anyway.
+                               // We do not propagate the monitor update to the user as it would be for a monitor
+                               // that we didn't manage to store (and that we don't care about - we don't respond
+                               // with the funding_signed so the channel can never go on chain).
+                               let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
+                               assert!(failed_htlcs.is_empty());
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
+                       },
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               // There's no problem signing a counterparty's funding transaction if our monitor
+                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
+                               // until we have persisted our monitor.
+                               chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+                               channel_ready = None; // Don't send the channel_ready now
+                       },
                }
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -4603,17 +4614,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
-                                       if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
-                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                       // monitor update contained within `shutdown_finish` was applied.
-                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                               shutdown_finish.0.take();
+                                       match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                                       if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                               // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                               // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                               // monitor update contained within `shutdown_finish` was applied.
+                                                               if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                                       shutdown_finish.0.take();
+                                                               }
                                                        }
-                                               }
-                                               return res
+                                                       return res
+                                               },
                                        }
                                        if let Some(msg) = channel_ready {
                                                send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
@@ -4687,13 +4701,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, channel_state, chan_entry);
-                                                               break result;
-                                                       }
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, channel_state, chan_entry);
+                                                       break result;
                                                }
                                        }
 
@@ -4882,9 +4895,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                },
                                                Ok(res) => res
                                        };
-                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                       return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
+                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                               if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+                                       return Err(e);
                                }
+
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
                                        node_id: counterparty_node_id.clone(),
                                        msg: revoke_and_ack,
@@ -4956,26 +4971,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
                                        let raa_updates = break_chan_entry!(self,
                                                chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
                                        htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
-                                               if was_frozen_for_monitor {
-                                                       assert!(raa_updates.commitment_update.is_none());
-                                                       assert!(raa_updates.accepted_htlcs.is_empty());
-                                                       assert!(raa_updates.failed_htlcs.is_empty());
-                                                       assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                                       break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
-                                               } else {
-                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
-                                                                       RAACommitmentOrder::CommitmentFirst, false,
-                                                                       raa_updates.commitment_update.is_some(), false,
-                                                                       raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                                       raa_updates.finalized_claimed_htlcs) {
-                                                               break Err(e);
-                                                       } else { unreachable!(); }
-                                               }
+                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+                                       if was_paused_for_mon_update {
+                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+                                               assert!(raa_updates.commitment_update.is_none());
+                                               assert!(raa_updates.accepted_htlcs.is_empty());
+                                               assert!(raa_updates.failed_htlcs.is_empty());
+                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+                                       }
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                                               if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false,
+                                                               raa_updates.commitment_update.is_some(), false,
+                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                               raa_updates.finalized_claimed_htlcs) {
+                                                       break Err(e);
+                                               } else { unreachable!(); }
                                        }
                                        if let Some(updates) = raa_updates.commitment_update {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -5188,7 +5204,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        });
                                                }
                                        },
-                                       MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+                                       MonitorEvent::Completed { funding_txo, monitor_update_id } => {
                                                self.channel_monitor_updated(&funding_txo, monitor_update_id);
                                        },
                                }
@@ -5240,16 +5256,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        ));
                                                }
                                                if let Some((commitment_update, monitor_update)) = commitment_opt {
-                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                                               has_monitor_update = true;
-                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
-                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
-                                                               if close_channel { return false; }
-                                                       } else {
-                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                       node_id: chan.get_counterparty_node_id(),
-                                                                       updates: commitment_update,
-                                                               });
+                                                       match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {
+                                                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                               node_id: chan.get_counterparty_node_id(),
+                                                                               updates: commitment_update,
+                                                                       });
+                                                               },
+                                                               e => {
+                                                                       has_monitor_update = true;
+                                                                       let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+                                                                       handle_errors.push((chan.get_counterparty_node_id(), res));
+                                                                       if close_channel { return false; }
+                                                               },
                                                        }
                                                }
                                                true
index 20e51ba2c8397199e26234f115da8553adecdf55..43dfef5d188bc51d8d0aa65df6965ef9ccaf7b1a 100644 (file)
@@ -10,7 +10,7 @@
 //! A bunch of useful utilities for building networks of nodes and exchanging messages between
 //! nodes for functional tests.
 
-use chain::{BestBlock, Confirm, Listen, Watch, keysinterface::KeysInterface};
+use chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, keysinterface::KeysInterface};
 use chain::channelmonitor::ChannelMonitor;
 use chain::transaction::OutPoint;
 use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
@@ -393,7 +393,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
-                               if let Err(_) = chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
+                               if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != ChannelMonitorUpdateStatus::Completed {
                                        panic!();
                                }
                        }
index b0b51bfe8628c20db50d5cf247055bb0abb0df1a..e5378e8ff0f85828b2c705870ac48aaf64dc0789 100644 (file)
@@ -12,7 +12,7 @@
 //! claim outputs on-chain.
 
 use chain;
-use chain::{Confirm, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use chain::chaininterface::LowerBoundedFeeEstimator;
 use chain::channelmonitor;
 use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
@@ -2335,7 +2335,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[3].node.list_channels().len(), 0);
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
-       nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon).unwrap();
+       assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
+               ChannelMonitorUpdateStatus::Completed);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
        check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
 }
@@ -4021,7 +4022,8 @@ fn test_funding_peer_disconnect() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -4389,7 +4391,8 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        check_added_monitors!(nodes[0], 1);
@@ -4501,7 +4504,8 @@ fn test_manager_serialize_deserialize_events() {
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
 
        // After deserializing, make sure the funding_transaction is still held by the channel manager
@@ -4585,7 +4589,8 @@ fn test_simple_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -4697,7 +4702,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        for monitor in node_0_monitors.drain(..) {
-               assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
+               assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+                       ChannelMonitorUpdateStatus::Completed);
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
@@ -7533,7 +7539,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
                }).unwrap().1
        };
        nodes[0].node = &node_state_0;
-       assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
+       assert_eq!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].chain_monitor = &monitor;
        nodes[0].chain_source = &chain_source;
 
@@ -8826,7 +8833,8 @@ fn test_bad_secret_hash() {
 fn test_update_err_monitor_lockdown() {
        // Our monitor will lock update of local commitment transaction if a broadcastion condition
        // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
-       // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateErr.
+       // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
+       // error.
        //
        // This scenario may happen in a watchtower setup, where watchtower process a block height
        // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
@@ -8859,7 +8867,7 @@ fn test_update_err_monitor_lockdown() {
                                &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
@@ -8879,8 +8887,8 @@ fn test_update_err_monitor_lockdown() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       if let Err(_) =  watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       assert_eq!(watchtower.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure);
+                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
@@ -8923,7 +8931,7 @@ fn test_concurrent_monitor_claim() {
                                &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
@@ -8952,7 +8960,7 @@ fn test_concurrent_monitor_claim() {
                                &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
@@ -8971,9 +8979,9 @@ fn test_concurrent_monitor_claim() {
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Watchtower Alice should already have seen the block and reject the update
-                       if let Err(_) =  watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure);
+                       assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::Completed);
+                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
@@ -9746,8 +9754,10 @@ fn test_forwardable_regen() {
        nodes_1_deserialized = nodes_1_deserialized_tmp;
        assert!(nodes_1_read.is_empty());
 
-       assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
-       assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok());
+       assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
+       assert_eq!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[1].node = &nodes_1_deserialized;
        check_added_monitors!(nodes[1], 2);
 
@@ -10211,7 +10221,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        for monitor in monitors {
                // On startup the preimage should have been copied into the non-persisted monitor:
                assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
-               nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
+               assert_eq!(nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor),
+                       ChannelMonitorUpdateStatus::Completed);
        }
        check_added_monitors!(nodes[3], 2);
 
index 02a3185e1509778ba13d38daa9d205fcbe07c3f8..18c8e3803521494b86e49e1e063f76f84b87d09c 100644 (file)
@@ -11,7 +11,7 @@
 //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
 //! payments thereafter.
 
-use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::OutPoint;
 use chain::keysinterface::KeysInterface;
@@ -436,7 +436,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -643,10 +644,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
                        $chan_manager = nodes_0_deserialized_tmp;
                        assert!(nodes_0_read.is_empty());
 
-                       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+                       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+                               ChannelMonitorUpdateStatus::Completed);
                        if !chan_1_monitor_serialized.0.is_empty() {
                                let funding_txo = chan_1_monitor.as_ref().unwrap().get_funding_txo().0;
-                               assert!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()).is_ok());
+                               assert_eq!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()),
+                                       ChannelMonitorUpdateStatus::Completed);
                        }
                        nodes[0].node = &$chan_manager;
                        check_added_monitors!(nodes[0], if !chan_1_monitor_serialized.0.is_empty() { 2 } else { 1 });
@@ -853,10 +856,10 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        }
 
        // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
-       // returning TemporaryFailure. This should cause the claim event to never make its way to the
+       // returning InProgress. This should cause the claim event to never make its way to the
        // ChannelManager.
        chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        if payment_timeout {
                connect_blocks(&nodes[0], 1);
@@ -881,7 +884,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
        // payment sent event.
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
        for update in mon_updates {
@@ -925,7 +928,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        };
        nodes_0_deserialized = nodes_0_deserialized_tmp;
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        check_added_monitors!(nodes[0], 1);
        nodes[0].node = &nodes_0_deserialized;
 
@@ -1015,7 +1019,8 @@ fn test_fulfill_restart_failure() {
        };
        nodes_1_deserialized = nodes_1_deserialized_tmp;
 
-       assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        check_added_monitors!(nodes[1], 1);
        nodes[1].node = &nodes_1_deserialized;
 
index db8c11e4c1eab56a39858375447e8a9f30593291..7a5b62e00d55226bbccc6bde57dfd3a395a398a9 100644 (file)
@@ -11,7 +11,7 @@
 //! other behavior that exists only on private channels or with a semi-trusted counterparty (eg
 //! LSP).
 
-use chain::{ChannelMonitorUpdateErr, Watch};
+use chain::{ChannelMonitorUpdateStatus, Watch};
 use chain::channelmonitor::ChannelMonitor;
 use chain::keysinterface::{Recipient, KeysInterface};
 use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
@@ -137,8 +137,10 @@ fn test_priv_forwarding_rejection() {
        assert!(nodes_1_read.is_empty());
        nodes_1_deserialized = nodes_1_deserialized_tmp;
 
-       assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
-       assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
+       assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a),
+               ChannelMonitorUpdateStatus::Completed);
+       assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b),
+               ChannelMonitorUpdateStatus::Completed);
        check_added_monitors!(nodes[1], 2);
        nodes[1].node = &nodes_1_deserialized;
 
@@ -624,7 +626,7 @@ fn test_0conf_channel_with_async_monitor() {
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
        let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
@@ -634,7 +636,7 @@ fn test_0conf_channel_with_async_monitor() {
 
        let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(bs_signed_locked.len(), 2);
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        match &bs_signed_locked[0] {
                MessageSendEvent::SendFundingSigned { node_id, msg } => {
@@ -680,8 +682,8 @@ fn test_0conf_channel_with_async_monitor() {
                _ => panic!("Unexpected event"),
        };
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_channel_update);
        nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_channel_update);
@@ -710,12 +712,12 @@ fn test_0conf_channel_with_async_monitor() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
        check_added_monitors!(nodes[0], 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
        check_added_monitors!(nodes[1], 0);
index 507dc34dfbe44a66a9505adb7e7aba60fca3951c..cdda13183d2b4eb50b2836a61e69bddb0070b134 100644 (file)
@@ -11,7 +11,7 @@
 
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
 use chain::transaction::OutPoint;
-use chain::{Confirm, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
 use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs};
 use ln::msgs::ChannelMessageHandler;
 use util::enforcing_trait_impls::EnforcingSigner;
@@ -342,7 +342,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
                }
 
-               nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+               assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
+                       ChannelMonitorUpdateStatus::Completed);
                check_added_monitors!(nodes[0], 1);
        }
 
index 820bf31c6e090931634b2c58be1f6254060d7c98..ad699354233cb705fd41221cee58293a3a62c3d2 100644 (file)
@@ -46,9 +46,15 @@ pub enum APIError {
                /// A human-readable error message
                err: String
        },
-       /// An attempt to call watch/update_channel returned an Err (ie you did this!), causing the
-       /// attempted action to fail.
-       MonitorUpdateFailed,
+       /// An attempt to call [`chain::Watch::watch_channel`]/[`chain::Watch::update_channel`]
+       /// returned a [`ChannelMonitorUpdateStatus::InProgress`] indicating the persistence of a
+       /// monitor update is awaiting async resolution. Once it resolves the attempted action should
+       /// complete automatically.
+       ///
+       /// [`chain::Watch::watch_channel`]: crate::chain::Watch::watch_channel
+       /// [`chain::Watch::update_channel`]: crate::chain::Watch::update_channel
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
+       MonitorUpdateInProgress,
        /// [`KeysInterface::get_shutdown_scriptpubkey`] returned a shutdown scriptpubkey incompatible
        /// with the channel counterparty as negotiated in [`InitFeatures`].
        ///
@@ -70,7 +76,7 @@ impl fmt::Debug for APIError {
                        APIError::FeeRateTooHigh {ref err, ref feerate} => write!(f, "{} feerate: {}", err, feerate),
                        APIError::RouteError {ref err} => write!(f, "Route error: {}", err),
                        APIError::ChannelUnavailable {ref err} => write!(f, "Channel unavailable: {}", err),
-                       APIError::MonitorUpdateFailed => f.write_str("Client indicated a channel monitor update failed"),
+                       APIError::MonitorUpdateInProgress => f.write_str("Client indicated a channel monitor update is in progress but not yet complete"),
                        APIError::IncompatibleShutdownScript { ref script } => {
                                write!(f, "Provided a scriptpubkey format not accepted by peer: {}", script)
                        },
index d04c3430bcc90de0ec7211c4ff12be4a8e2d6bd6..cfd8f5d89fc7157cd5308d7de9e56f48f1ac7b5e 100644 (file)
@@ -69,18 +69,22 @@ impl<'a, A: KVStorePersister, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Der
 impl<ChannelSigner: Sign, K: KVStorePersister> Persist<ChannelSigner> for K {
        // TODO: We really need a way for the persister to inform the user that its time to crash/shut
        // down once these start returning failure.
-       // A PermanentFailure implies we need to shut down since we're force-closing channels without
-       // even broadcasting!
+       // A PermanentFailure implies we should probably just shut down the node since we're
+       // force-closing channels without even broadcasting!
 
-       fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               self.persist(&key, monitor)
-                       .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
+               match self.persist(&key, monitor) {
+                       Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+                       Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
+               }
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               self.persist(&key, monitor)
-                       .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
+               match self.persist(&key, monitor) {
+                       Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+                       Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
+               }
        }
 }
index 4ce51fc8bedc252c9eb59f45aa2564db29b07b5c..9b2f222c519f9c672bf6d538ef23898de2e8020f 100644 (file)
@@ -126,7 +126,7 @@ impl<'a> TestChainMonitor<'a> {
        }
 }
 impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
@@ -140,7 +140,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                self.chain_monitor.watch_channel(funding_txo, new_monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
                // Every monitor update should survive roundtrip
                let mut w = TestVecWriter(Vec::new());
                update.write(&mut w).unwrap();
@@ -178,10 +178,10 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
 }
 
 pub struct TestPersister {
-       pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+       pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
        /// If this is set to Some(), after the next return, we'll always return this until update_ret
        /// is changed:
-       pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
+       pub next_update_ret: Mutex<Option<chain::ChannelMonitorUpdateStatus>>,
        /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
        /// MonitorUpdateId here.
        pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
@@ -192,23 +192,23 @@ pub struct TestPersister {
 impl TestPersister {
        pub fn new() -> Self {
                Self {
-                       update_ret: Mutex::new(Ok(())),
+                       update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed),
                        next_update_ret: Mutex::new(None),
                        chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
                        offchain_monitor_updates: Mutex::new(HashMap::new()),
                }
        }
 
-       pub fn set_update_ret(&self, ret: Result<(), chain::ChannelMonitorUpdateErr>) {
+       pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) {
                *self.update_ret.lock().unwrap() = ret;
        }
 
-       pub fn set_next_update_ret(&self, next_ret: Option<Result<(), chain::ChannelMonitorUpdateErr>>) {
+       pub fn set_next_update_ret(&self, next_ret: Option<chain::ChannelMonitorUpdateStatus>) {
                *self.next_update_ret.lock().unwrap() = next_ret;
        }
 }
 impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
                        *self.update_ret.lock().unwrap() = next_ret;
@@ -216,7 +216,7 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
                ret
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
                        *self.update_ret.lock().unwrap() = next_ret;