Rework `chain::Watch` return types to make async updates less scary
authorMatt Corallo <git@bluematt.me>
Mon, 18 Jul 2022 01:32:27 +0000 (01:32 +0000)
committerMatt Corallo <git@bluematt.me>
Thu, 29 Sep 2022 20:27:53 +0000 (20:27 +0000)
When a `chain::Watch` `ChannelMonitor` update method is called, the
user has three options:
 (a) persist the monitor update immediately and return success,
 (b) fail to persist the monitor update immediately and return
     failure,
 (c) return a flag indicating the monitor update is in progress and
     will complete in the future.

(c) is rather harmless, and in some deployments should be expected
to be the return value for all monitor update calls, but currently
requires returning `Err(ChannelMonitorUpdateErr::TemporaryFailure)`
which isn't very descriptive and sounds scarier than it is.

Instead, here, we change the return type used to be a single enum
(rather than a Result) and rename `TemporaryFailure`
`UpdateInProgress`.

17 files changed:
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/utils/test_persister.rs
lightning-persister/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/util/errors.rs
lightning/src/util/persist.rs
lightning/src/util/test_utils.rs

index 062b5fb1b01179f7606276fc1e6d076311dc766d..7944d04ad1c33d1b4de0aed044f429df9f1c7860 100644 (file)
@@ -32,7 +32,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
 
 use lightning::chain;
-use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
 use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
 use lightning::chain::transaction::OutPoint;
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
@@ -124,7 +124,7 @@ impl TestChainMonitor {
        }
 }
 impl chain::Watch<EnforcingSigner> for TestChainMonitor {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
                let mut ser = VecWriter(Vec::new());
                monitor.write(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
@@ -134,7 +134,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                self.chain_monitor.watch_channel(funding_txo, monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
                let mut map_lock = self.latest_monitors.lock().unwrap();
                let mut map_entry = match map_lock.entry(funding_txo) {
                        hash_map::Entry::Occupied(entry) => entry,
@@ -363,7 +363,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
                        let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
-                               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
+                               Arc::new(TestPersister {
+                                       update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+                               }), Arc::clone(&keys_manager)));
 
                        let mut config = UserConfig::default();
                        config.channel_config.forwarding_fee_proportional_millionths = 0;
@@ -383,7 +385,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                    let keys_manager = Arc::clone(& $keys_manager);
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
                        let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
-                               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
+                               Arc::new(TestPersister {
+                                       update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+                               }), Arc::clone(& $keys_manager)));
 
                        let mut config = UserConfig::default();
                        config.channel_config.forwarding_fee_proportional_millionths = 0;
@@ -412,7 +416,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                        let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
                        for (funding_txo, mon) in monitors.drain() {
-                               assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok());
+                               assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
+                                       ChannelMonitorUpdateStatus::Completed);
                        }
                        res
                } }
@@ -889,12 +894,12 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        // bit-twiddling mutations to have similar effects. This is probably overkill, but no
                        // harm in doing so.
 
-                       0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
-                       0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
-                       0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
-                       0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
+                       0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+                       0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+                       0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+                       0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
 
                        0x08 => {
                                if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
@@ -1119,9 +1124,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                // after we resolve all pending events.
                                // First make sure there are no pending monitor updates, resetting the error state
                                // and calling force_channel_monitor_updated for each monitor.
-                               *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
-                               *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
-                               *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
+                               *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+                               *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+                               *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
 
                                if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
                                        monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
index 9f1580288cac0dda4d67f7dc8a02522e4bd0627a..7edba55878f10d274e4232833516ac418690dcc5 100644 (file)
@@ -29,7 +29,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
 
 use lightning::chain;
-use lightning::chain::{BestBlock, Confirm, Listen};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen};
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use lightning::chain::chainmonitor;
 use lightning::chain::transaction::OutPoint;
@@ -389,7 +389,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
 
        let broadcast = Arc::new(TestBroadcaster{ txn_broadcasted: Mutex::new(Vec::new()) });
        let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(),
-               Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) })));
+               Arc::new(TestPersister { update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) })));
 
        let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), inbound_payment_key: KeyMaterial(inbound_payment_key.try_into().unwrap()), counter: AtomicU64::new(0) });
        let mut config = UserConfig::default();
index 7ca1ff96d05ae5f0311a9702836800c01f012b65..44675fa787294b4a63144c9a2cddfacc4b9b33a6 100644 (file)
@@ -7,14 +7,14 @@ use lightning::util::enforcing_trait_impls::EnforcingSigner;
 use std::sync::Mutex;
 
 pub struct TestPersister {
-       pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+       pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
 }
 impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 }
index 03e42e43e42c2b425f2ba0d830c542952883e286..c36609351c7d4280cd4ac5a65265ad923ddc76de 100644 (file)
@@ -138,7 +138,7 @@ mod tests {
        use bitcoin::blockdata::block::{Block, BlockHeader};
        use bitcoin::hashes::hex::FromHex;
        use bitcoin::{Txid, TxMerkleNode};
-       use lightning::chain::ChannelMonitorUpdateErr;
+       use lightning::chain::ChannelMonitorUpdateStatus;
        use lightning::chain::chainmonitor::Persist;
        use lightning::chain::transaction::OutPoint;
        use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
@@ -270,7 +270,7 @@ mod tests {
                        index: 0
                };
                match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
                        _ => panic!("unexpected result from persisting new channel")
                }
 
@@ -307,7 +307,7 @@ mod tests {
                        index: 0
                };
                match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
                        _ => panic!("unexpected result from persisting new channel")
                }
 
index 9f15a2a27996ba1e40faf27a97f80c27fb434659..2479e8f7e7b5efd00582d2e9aaac7ecd9cfa9f31 100644 (file)
@@ -27,7 +27,7 @@ use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::hash_types::Txid;
 
 use chain;
-use chain::{ChannelMonitorUpdateErr, Filter, WatchedOutput};
+use chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::{OutPoint, TransactionData};
@@ -78,20 +78,21 @@ impl MonitorUpdateId {
 ///
 /// Each method can return three possible values:
 ///  * If persistence (including any relevant `fsync()` calls) happens immediately, the
-///    implementation should return `Ok(())`, indicating normal channel operation should continue.
+///    implementation should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal
+///    channel operation should continue.
 ///  * If persistence happens asynchronously, implementations should first ensure the
 ///    [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return
-///    `Err(ChannelMonitorUpdateErr::TemporaryFailure)` while the update continues in the
-///    background. Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be
-///    called with the corresponding [`MonitorUpdateId`].
+///    [`ChannelMonitorUpdateStatus::InProgress`] while the update continues in the background.
+///    Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be called with
+///    the corresponding [`MonitorUpdateId`].
 ///
 ///    Note that unlike the direct [`chain::Watch`] interface,
 ///    [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
 ///
 ///  * If persistence fails for some reason, implementations should return
-///    `Err(ChannelMonitorUpdateErr::PermanentFailure)`, in which case the channel will likely be
+///    [`ChannelMonitorUpdateStatus::PermanentFailure`], in which case the channel will likely be
 ///    closed without broadcasting the latest state. See
-///    [`ChannelMonitorUpdateErr::PermanentFailure`] for more details.
+///    [`ChannelMonitorUpdateStatus::PermanentFailure`] for more details.
 pub trait Persist<ChannelSigner: Sign> {
        /// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
        /// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
@@ -101,14 +102,14 @@ pub trait Persist<ChannelSigner: Sign> {
        /// and the stored channel data). Note that you **must** persist every new monitor to disk.
        ///
        /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
-       /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+       /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
        ///
        /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
-       /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+       /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+       fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
 
        /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
        /// update.
@@ -136,14 +137,14 @@ pub trait Persist<ChannelSigner: Sign> {
        /// whereas updates are small and `O(1)`.
        ///
        /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
-       /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+       /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
        ///
        /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
        /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
-       /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+       /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+       fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
 }
 
 struct MonitorHolder<ChannelSigner: Sign> {
@@ -151,9 +152,9 @@ struct MonitorHolder<ChannelSigner: Sign> {
        /// The full set of pending monitor updates for this Channel.
        ///
        /// Note that this lock must be held during updates to prevent a race where we call
-       /// update_persisted_channel, the user returns a TemporaryFailure, and then calls
-       /// channel_monitor_updated immediately, racing our insertion of the pending update into the
-       /// contained Vec.
+       /// update_persisted_channel, the user returns a
+       /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
+       /// immediately, racing our insertion of the pending update into the contained Vec.
        ///
        /// Beyond the synchronization of updates themselves, we cannot handle user events until after
        /// any chain updates have been stored on disk. Thus, we scan this list when returning updates
@@ -287,20 +288,20 @@ where C::Target: chain::Filter,
                                        if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
                                                // If there are not ChainSync persists awaiting completion, go ahead and
                                                // set last_chain_persist_height here - we wouldn't want the first
-                                               // TemporaryFailure to always immediately be considered "overly delayed".
+                                               // InProgress to always immediately be considered "overly delayed".
                                                monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
                                        }
                                }
 
                                log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
                                match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
-                                       Ok(()) =>
+                                       ChannelMonitorUpdateStatus::Completed =>
                                                log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
-                                       Err(ChannelMonitorUpdateErr::PermanentFailure) => {
+                                       ChannelMonitorUpdateStatus::PermanentFailure => {
                                                monitor_state.channel_perm_failed.store(true, Ordering::Release);
                                                self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
                                        },
-                                       Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
+                                       ChannelMonitorUpdateStatus::InProgress => {
                                                log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
                                                pending_monitor_updates.push(update_id);
                                        },
@@ -400,12 +401,12 @@ where C::Target: chain::Filter,
        }
 
        /// Indicates the persistence of a [`ChannelMonitor`] has completed after
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation.
+       /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
        ///
        /// Thus, the anticipated use is, at a high level:
        ///  1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
        ///     update to disk and begins updating any remote (e.g. watchtower/backup) copies,
-       ///     returning [`ChannelMonitorUpdateErr::TemporaryFailure`],
+       ///     returning [`ChannelMonitorUpdateStatus::InProgress`],
        ///  2) once all remote copies are updated, you call this function with the
        ///     `completed_update_id` that completed, and once all pending updates have completed the
        ///     channel will be re-enabled.
@@ -438,10 +439,10 @@ where C::Target: chain::Filter,
                                if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) {
                                        // If there are still monitor updates pending (or an old monitor update
                                        // finished after a later one perm-failed), we cannot yet construct an
-                                       // UpdateCompleted event.
+                                       // Completed event.
                                        return Ok(());
                                }
-                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
+                               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
                                        funding_txo,
                                        monitor_update_id: monitor_data.monitor.get_latest_update_id(),
                                }], monitor_data.monitor.get_counterparty_node_id()));
@@ -464,7 +465,7 @@ where C::Target: chain::Filter,
        pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
                let monitors = self.monitors.read().unwrap();
                let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
-               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
+               self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
                        funding_txo,
                        monitor_update_id,
                }], counterparty_node_id));
@@ -570,27 +571,31 @@ where C::Target: chain::Filter,
        ///
        /// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
        /// monitors lock.
-       fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus {
                let mut monitors = self.monitors.write().unwrap();
                let entry = match monitors.entry(funding_outpoint) {
                        hash_map::Entry::Occupied(_) => {
                                log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
-                               return Err(ChannelMonitorUpdateErr::PermanentFailure)},
+                               return ChannelMonitorUpdateStatus::PermanentFailure
+                       },
                        hash_map::Entry::Vacant(e) => e,
                };
                log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
                let update_id = MonitorUpdateId::from_new_monitor(&monitor);
                let mut pending_monitor_updates = Vec::new();
                let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
-               if persist_res.is_err() {
-                       log_error!(self.logger, "Failed to persist new ChannelMonitor for channel {}: {:?}", log_funding_info!(monitor), persist_res);
-               } else {
-                       log_trace!(self.logger, "Finished persisting new ChannelMonitor for channel {}", log_funding_info!(monitor));
-               }
-               if persist_res == Err(ChannelMonitorUpdateErr::PermanentFailure) {
-                       return persist_res;
-               } else if persist_res.is_err() {
-                       pending_monitor_updates.push(update_id);
+               match persist_res {
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
+                               pending_monitor_updates.push(update_id);
+                       },
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               log_error!(self.logger, "Persistence of new ChannelMonitor for channel {} failed", log_funding_info!(monitor));
+                               return persist_res;
+                       },
+                       ChannelMonitorUpdateStatus::Completed => {
+                               log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
+                       }
                }
                if let Some(ref chain_source) = self.chain_source {
                        monitor.load_outputs_to_watch(chain_source);
@@ -606,7 +611,7 @@ where C::Target: chain::Filter,
 
        /// Note that we persist the given `ChannelMonitor` update while holding the
        /// `ChainMonitor` monitors lock.
-       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
                // Update the monitor that watches the channel referred to by the given outpoint.
                let monitors = self.monitors.read().unwrap();
                match monitors.get(&funding_txo) {
@@ -619,7 +624,7 @@ where C::Target: chain::Filter,
                                #[cfg(any(test, fuzzing))]
                                panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
                                #[cfg(not(any(test, fuzzing)))]
-                               Err(ChannelMonitorUpdateErr::PermanentFailure)
+                               ChannelMonitorUpdateStatus::PermanentFailure
                        },
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
@@ -633,20 +638,23 @@ where C::Target: chain::Filter,
                                let update_id = MonitorUpdateId::from_monitor_update(&update);
                                let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
                                let persist_res = self.persister.update_persisted_channel(funding_txo, &Some(update), monitor, update_id);
-                               if let Err(e) = persist_res {
-                                       if e == ChannelMonitorUpdateErr::TemporaryFailure {
+                               match persist_res {
+                                       ChannelMonitorUpdateStatus::InProgress => {
                                                pending_monitor_updates.push(update_id);
-                                       } else {
+                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
+                                       },
+                                       ChannelMonitorUpdateStatus::PermanentFailure => {
                                                monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                       }
-                                       log_error!(self.logger, "Failed to persist ChannelMonitor update for channel {}: {:?}", log_funding_info!(monitor), e);
-                               } else {
-                                       log_trace!(self.logger, "Finished persisting ChannelMonitor update for channel {}", log_funding_info!(monitor));
+                                               log_error!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} failed", log_funding_info!(monitor));
+                                       },
+                                       ChannelMonitorUpdateStatus::Completed => {
+                                               log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+                                       },
                                }
                                if update_res.is_err() {
-                                       Err(ChannelMonitorUpdateErr::PermanentFailure)
+                                       ChannelMonitorUpdateStatus::PermanentFailure
                                } else if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
-                                       Err(ChannelMonitorUpdateErr::PermanentFailure)
+                                       ChannelMonitorUpdateStatus::PermanentFailure
                                } else {
                                        persist_res
                                }
@@ -723,7 +731,7 @@ mod tests {
        use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
        use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
        use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
-       use chain::{ChannelMonitorUpdateErr, Confirm, Watch};
+       use chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
        use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
        use ln::channelmanager::{self, PaymentSendFailure};
        use ln::functional_test_utils::*;
@@ -747,7 +755,7 @@ mod tests {
                let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
                chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
-               chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+               chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
                nodes[1].node.claim_funds(payment_preimage_1);
                check_added_monitors!(nodes[1], 1);
@@ -756,7 +764,7 @@ mod tests {
                check_added_monitors!(nodes[1], 1);
                expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
 
-               chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+               chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
                let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
                assert_eq!(persistences.len(), 1);
@@ -834,7 +842,7 @@ mod tests {
 
                // Temp-fail the block connection which will hold the channel-closed event
                chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
                // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
                // channel is now closed, but the ChannelManager doesn't know that yet.
@@ -850,7 +858,7 @@ mod tests {
 
                // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
                // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
-               chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
                unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)),
                        true, APIError::ChannelUnavailable { ref err },
                        assert!(err.contains("ChannelMonitor storage failure")));
@@ -896,7 +904,7 @@ mod tests {
                create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
                chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
                connect_blocks(&nodes[0], 1);
                // Before processing events, the ChannelManager will still think the Channel is open and
index 69cdf824a61db429725ba7808aa7820712fbb68d..8f8cbdf448adb80d7fa50ee279c86862c1d0abd6 100644 (file)
@@ -76,14 +76,14 @@ pub struct ChannelMonitorUpdate {
        /// increasing and increase by one for each new update, with one exception specified below.
        ///
        /// This sequence number is also used to track up to which points updates which returned
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`] have been applied to all copies of a given
+       /// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
        /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
        ///
        /// The only instance where update_id values are not strictly increasing is the case where we
        /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
        /// its docs for more details.
        ///
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
        pub update_id: u64,
 }
 
@@ -134,10 +134,10 @@ pub enum MonitorEvent {
        CommitmentTxConfirmed(OutPoint),
 
        /// Indicates a [`ChannelMonitor`] update has completed. See
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`] for more information on how this is used.
+       /// [`ChannelMonitorUpdateStatus::InProgress`] for more information on how this is used.
        ///
-       /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
-       UpdateCompleted {
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
+       Completed {
                /// The funding outpoint of the [`ChannelMonitor`] that was updated
                funding_txo: OutPoint,
                /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
@@ -149,15 +149,15 @@ pub enum MonitorEvent {
        },
 
        /// Indicates a [`ChannelMonitor`] update has failed. See
-       /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more information on how this is used.
+       /// [`ChannelMonitorUpdateStatus::PermanentFailure`] for more information on how this is used.
        ///
-       /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
+       /// [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
        UpdateFailed(OutPoint),
 }
 impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
-       // Note that UpdateCompleted and UpdateFailed are currently never serialized to disk as they are
+       // Note that Completed and UpdateFailed are currently never serialized to disk as they are
        // generated only in ChainMonitor
-       (0, UpdateCompleted) => {
+       (0, Completed) => {
                (0, funding_txo, required),
                (2, monitor_update_id, required),
        },
@@ -1319,8 +1319,8 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
        /// the Channel was out-of-date.
        ///
        /// You may also use this to broadcast the latest local commitment transaction, either because
-       /// a monitor update failed with [`ChannelMonitorUpdateErr::PermanentFailure`] or because we've
-       /// fallen behind (i.e we've received proof that our counterparty side knows a revocation
+       /// a monitor update failed with [`ChannelMonitorUpdateStatus::PermanentFailure`] or because we've
+       /// fallen behind (i.e. we've received proof that our counterparty side knows a revocation
        /// secret we gave them that they shouldn't know).
        ///
        /// Broadcasting these transactions in the second case is UNSAFE, as they allow counterparty
@@ -1329,7 +1329,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
        /// may be to contact the other node operator out-of-band to coordinate other options available
        /// to you. In any-case, the choice is up to you.
        ///
-       /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
+       /// [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
        pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
        where L::Target: Logger {
                self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
index 66491d7282e4556f8121c88b07c493a54d9e0941..c54f9b1d7eba6814cb831bfd3c91c112fcc096cf 100644 (file)
@@ -187,16 +187,22 @@ pub trait Confirm {
        fn get_relevant_txids(&self) -> Vec<Txid>;
 }
 
-/// An error enum representing a failure to persist a channel monitor update.
+/// An enum representing the status of a channel monitor update persistence.
 #[derive(Clone, Copy, Debug, PartialEq)]
-pub enum ChannelMonitorUpdateErr {
+pub enum ChannelMonitorUpdateStatus {
+       /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
+       /// have been updated.
+       ///
+       /// This includes performing any `fsync()` calls required to ensure the update is guaranteed to
+       /// be available on restart even if the application crashes.
+       Completed,
        /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
        /// our state failed, but is expected to succeed at some point in the future).
        ///
        /// Such a failure will "freeze" a channel, preventing us from revoking old states or
        /// submitting new commitment transactions to the counterparty. Once the update(s) which failed
-       /// have been successfully applied, a [`MonitorEvent::UpdateCompleted`] can be used to restore
-       /// the channel to an operational state.
+       /// have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the
+       /// channel to an operational state.
        ///
        /// Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`].
        /// If you return this error you must ensure that it is written to disk safely before writing
@@ -207,16 +213,17 @@ pub enum ChannelMonitorUpdateErr {
        /// attempting to claim it on this channel) and those updates must still be persisted.
        ///
        /// No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s
-       /// until a [`MonitorEvent::UpdateCompleted`] is provided, even if you return no error on a
-       /// later monitor update for the same channel.
+       /// until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later
+       /// monitor update for the same channel.
        ///
        /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
        /// remote location (with local copies persisted immediately), it is anticipated that all
-       /// updates will return TemporaryFailure until the remote copies could be updated.
+       /// updates will return [`InProgress`] until the remote copies could be updated.
        ///
-       /// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+       /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+       /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-       TemporaryFailure,
+       InProgress,
        /// Used to indicate no further channel monitor updates will be allowed (likely a disk failure
        /// or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable).
        ///
@@ -253,7 +260,7 @@ pub enum ChannelMonitorUpdateErr {
        /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
        /// lagging behind on block processing.
        ///
-       /// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+       /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        PermanentFailure,
 }
@@ -273,10 +280,10 @@ pub enum ChannelMonitorUpdateErr {
 /// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing
 /// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it
 /// could result in a revoked transaction being broadcast, allowing the counterparty to claim all
-/// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle
+/// funds in the channel. See [`ChannelMonitorUpdateStatus`] for more details about how to handle
 /// multiple instances.
 ///
-/// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+/// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
 pub trait Watch<ChannelSigner: Sign> {
        /// Watches a channel identified by `funding_txo` using `monitor`.
        ///
@@ -284,21 +291,21 @@ pub trait Watch<ChannelSigner: Sign> {
        /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
        /// calling [`block_connected`] and [`block_disconnected`] on the monitor.
        ///
-       /// Note: this interface MUST error with [`ChannelMonitorUpdateErr::PermanentFailure`] if
+       /// Note: this interface MUST error with [`ChannelMonitorUpdateStatus::PermanentFailure`] if
        /// the given `funding_txo` has previously been registered via `watch_channel`.
        ///
        /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch
        /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected
        /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
 
        /// Updates a channel identified by `funding_txo` by applying `update` to its monitor.
        ///
        /// Implementations must call [`update_monitor`] with the given update. See
-       /// [`ChannelMonitorUpdateErr`] for invariants around returning an error.
+       /// [`ChannelMonitorUpdateStatus`] for invariants around returning an error.
        ///
        /// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor
-       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
+       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus;
 
        /// Returns any monitor events since the last call. Subsequent calls must only return new
        /// events.
@@ -308,7 +315,7 @@ pub trait Watch<ChannelSigner: Sign> {
        /// to disk.
        ///
        /// For details on asynchronous [`ChannelMonitor`] updating and returning
-       /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
+       /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
        fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
 }
 
@@ -327,9 +334,9 @@ pub trait Watch<ChannelSigner: Sign> {
 /// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter`
 /// should not block on I/O. Implementations should instead queue the newly monitored data to be
 /// processed later. Then, in order to block until the data has been processed, any [`Watch`]
-/// invocation that has called the `Filter` must return [`TemporaryFailure`].
+/// invocation that has called the `Filter` must return [`InProgress`].
 ///
-/// [`TemporaryFailure`]: ChannelMonitorUpdateErr::TemporaryFailure
+/// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
 /// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki
 /// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki
 pub trait Filter {
index aca5503521e75c7b16cb3444d545226d09be057a..c0648eb2b8c3f8b5cafff2ce29a849e0d6c5b836 100644 (file)
@@ -7,7 +7,7 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
+//! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
 //! monitor updates.
 //! There are a bunch of these as their handling is relatively error-prone so they are split out
 //! here. See also the chanmon_fail_consistency fuzz test.
@@ -18,7 +18,7 @@ use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
 use chain::transaction::OutPoint;
-use chain::{ChannelMonitorUpdateErr, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
 use ln::channel::AnnouncementSigsState;
 use ln::msgs;
@@ -50,7 +50,7 @@ fn test_simple_monitor_permanent_update_fail() {
        create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
        let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
        unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {});
        check_added_monitors!(nodes[0], 2);
 
@@ -116,7 +116,7 @@ fn test_monitor_and_persister_update_fail() {
                        &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                chain_mon
        };
        let header = BlockHeader {
@@ -129,8 +129,8 @@ fn test_monitor_and_persister_update_fail() {
        };
        chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
 
-       // Set the persister's return value to be a TemporaryFailure.
-       persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       // Set the persister's return value to be a InProgress.
+       persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        // Try to update ChannelMonitor
        nodes[1].node.claim_funds(preimage);
@@ -142,12 +142,12 @@ fn test_monitor_and_persister_update_fail() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) {
                if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Check that even though the persister is returning a TemporaryFailure,
+                       // Check that even though the persister is returning a InProgress,
                        // because the update is bogus, ultimately the error that's returned
                        // should be a PermanentFailure.
-                       if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
-                       logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1);
-                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
+                       logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        } else { assert!(false); };
 
@@ -167,7 +167,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 
        let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        {
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {});
@@ -184,7 +184,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
                reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -220,7 +220,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        // Now set it to failed again...
        let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
        {
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
                check_added_monitors!(nodes[0], 1);
        }
@@ -261,8 +261,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // * First we route a payment, then get a temporary monitor update failure when trying to
        //   route a second payment. We then claim the first payment.
        // * If disconnect_count is set, we will disconnect at this point (which is likely as
-       //   TemporaryFailure likely indicates net disconnect which resulted in failing to update
-       //   the ChannelMonitor on a watchtower).
+       //   InProgress likely indicates net disconnect which resulted in failing to update the
+       //   ChannelMonitor on a watchtower).
        // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
        //   immediately, otherwise we wait disconnect and deliver them via the reconnect
        //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
@@ -284,7 +284,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
        {
-               chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+               chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
                check_added_monitors!(nodes[0], 1);
        }
@@ -339,7 +339,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        }
 
        // Now fix monitor updating...
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -631,14 +631,14 @@ fn test_monitor_update_fail_cs() {
        let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -662,7 +662,7 @@ fn test_monitor_update_fail_cs() {
                        assert!(updates.update_fee.is_none());
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 
-                       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+                       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
                        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -672,7 +672,7 @@ fn test_monitor_update_fail_cs() {
                _ => panic!("Unexpected event"),
        }
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -724,7 +724,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -732,7 +732,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -781,7 +781,7 @@ fn test_monitor_update_raa_while_paused() {
        check_added_monitors!(nodes[1], 1);
        let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -790,10 +790,10 @@ fn test_monitor_update_raa_while_paused() {
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
+       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -873,7 +873,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now fail monitor updating.
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -889,7 +889,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                check_added_monitors!(nodes[0], 1);
        }
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
        send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
@@ -919,7 +919,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Restore monitor updating, ensuring we immediately get a fail-back update and a
        // update_add update.
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1123,7 +1123,7 @@ fn test_monitor_update_fail_reestablish() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
        nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
 
@@ -1160,7 +1160,7 @@ fn test_monitor_update_fail_reestablish() {
                get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
                        .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1225,7 +1225,7 @@ fn raa_no_response_awaiting_raa_state() {
        // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
        // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
        // then restore channel monitor updates.
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1234,10 +1234,10 @@ fn raa_no_response_awaiting_raa_state() {
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
+       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        // nodes[1] should be AwaitingRAA here!
@@ -1328,7 +1328,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
        // update.
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
@@ -1355,7 +1355,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now un-fail the monitor, which will result in B sending its original commitment update,
        // receiving the commitment update from A, and the resulting commitment dances.
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1434,7 +1434,7 @@ fn monitor_failed_no_reestablish_response() {
                check_added_monitors!(nodes[0], 1);
        }
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        let payment_event = SendEvent::from_event(events.pop().unwrap());
@@ -1460,7 +1460,7 @@ fn monitor_failed_no_reestablish_response() {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
        let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1534,7 +1534,7 @@ fn first_message_on_recv_ordering() {
        let payment_event = SendEvent::from_event(events.pop().unwrap());
        assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        // Deliver the final RAA for the first payment, which does not require a response. RAAs
        // generally require a commitment_signed, so the fact that we're expecting an opposite response
@@ -1553,7 +1553,7 @@ fn first_message_on_recv_ordering() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1597,7 +1597,7 @@ fn test_monitor_update_fail_claim() {
 
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
@@ -1616,7 +1616,7 @@ fn test_monitor_update_fail_claim() {
 
        // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
        // paused, so forward shouldn't succeed until we call channel_monitor_updated().
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let mut events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -1727,13 +1727,13 @@ fn test_monitor_update_on_pending_forwards() {
        nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1787,7 +1787,7 @@ fn monitor_update_claim_fail_no_response() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
@@ -1796,7 +1796,7 @@ fn monitor_update_claim_fail_no_response() {
        assert_eq!(events.len(), 0);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1834,19 +1834,19 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
        check_added_monitors!(nodes[0], 0);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
        let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
@@ -1886,7 +1886,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        }
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
@@ -1958,8 +1958,8 @@ fn test_path_paused_mpp() {
 
        // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
        // (for the path 0 -> 2 -> 3) fails.
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
-       chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress));
 
        // Now check that we get the right return value, indicating that the first path succeeded but
        // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
@@ -1970,7 +1970,7 @@ fn test_path_paused_mpp() {
                if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
        } else { panic!(); }
        check_added_monitors!(nodes[0], 2);
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        // Pass the first HTLC of the payment along to nodes[3].
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -2267,7 +2267,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
        check_added_monitors!(nodes[0], 0);
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.claim_funds(payment_preimage_0);
        check_added_monitors!(nodes[0], 1);
        expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
@@ -2317,7 +2317,8 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                        nodes[0].node = &nodes_0_deserialized;
                        assert!(nodes_0_read.is_empty());
 
-                       nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+                       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
+                               ChannelMonitorUpdateStatus::Completed);
                        check_added_monitors!(nodes[0], 1);
                } else {
                        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
@@ -2361,7 +2362,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
 
        // If we finish updating the monitor, we should free the holding cell right away (this did
        // not occur prior to #756).
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
 
@@ -2560,8 +2561,8 @@ fn test_temporary_error_during_shutdown() {
 
        let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
@@ -2572,8 +2573,8 @@ fn test_temporary_error_during_shutdown() {
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
@@ -2581,7 +2582,7 @@ fn test_temporary_error_during_shutdown() {
 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
 
@@ -2614,7 +2615,7 @@ fn test_permanent_error_during_sending_shutdown() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
        check_closed_broadcast!(nodes[0], true);
@@ -2635,7 +2636,7 @@ fn test_permanent_error_during_handling_shutdown() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
        let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -2658,20 +2659,20 @@ fn double_temp_error() {
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
        let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        // `claim_funds` results in a ChannelMonitorUpdate.
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
        // which had some asserts that prevented it from being called twice.
        nodes[1].node.claim_funds(payment_preimage_2);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
index 8872a905ef347d00ca0a84b76267a995b1c571ff..fb73951a61495ccbe979f02945460e46023cce37 100644 (file)
@@ -35,7 +35,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::{LockTime, secp256k1, Sequence};
 
 use chain;
-use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
+use chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
 use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
@@ -1171,7 +1171,7 @@ pub enum PaymentSendFailure {
        ///
        /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
        /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
-       /// case of Ok(())) or will send once a [`MonitorEvent::UpdateCompleted`] is provided for the
+       /// case of Ok(())) or will send once a [`MonitorEvent::Completed`] is provided for the
        /// next-hop channel with the latest update_id.
        PartialFailure {
                /// The errors themselves, in the same order as the route hops.
@@ -1332,8 +1332,8 @@ macro_rules! remove_channel {
 macro_rules! handle_monitor_err {
        ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
-                       ChannelMonitorUpdateErr::PermanentFailure => {
-                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
                                update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
                                // TODO: $failed_fails is dropped here, which will cause other channels to hit the
                                // chain in a confused state! We need to move them into the ChannelMonitor which
@@ -1348,8 +1348,8 @@ macro_rules! handle_monitor_err {
                                                $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
-                       ChannelMonitorUpdateErr::TemporaryFailure => {
-                               log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
                                                log_bytes!($chan_id[..]),
                                                if $resend_commitment && $resend_raa {
                                                                match $action_type {
@@ -1371,6 +1371,9 @@ macro_rules! handle_monitor_err {
                                $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
+                       ChannelMonitorUpdateStatus::Completed => {
+                               (Ok(()), false)
+                       },
                }
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
@@ -1398,27 +1401,6 @@ macro_rules! handle_monitor_err {
        };
 }
 
-macro_rules! return_monitor_err {
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
-       };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-       }
-}
-
-// Does not break in case of TemporaryFailure!
-macro_rules! maybe_break_monitor_err {
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
-                       (e, ChannelMonitorUpdateErr::PermanentFailure) => {
-                               break e;
-                       },
-                       (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
-               }
-       }
-}
-
 macro_rules! send_channel_ready {
        ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
                $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
@@ -1493,15 +1475,18 @@ macro_rules! handle_chan_restoration_locked {
                                // only case where we can get a new ChannelMonitorUpdate would be if we also
                                // have some commitment updates to send as well.
                                assert!($commitment_update.is_some());
-                               if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                       // channel_reestablish doesn't guarantee the order it returns is sensical
-                                       // for the messages it returns, but if we're setting what messages to
-                                       // re-transmit on monitor update success, we need to make sure it is sane.
-                                       let mut order = $order;
-                                       if $raa.is_none() {
-                                               order = RAACommitmentOrder::CommitmentFirst;
+                               match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                       ChannelMonitorUpdateStatus::Completed => {},
+                                       e => {
+                                               // channel_reestablish doesn't guarantee the order it returns is sensical
+                                               // for the messages it returns, but if we're setting what messages to
+                                               // re-transmit on monitor update success, we need to make sure it is sane.
+                                               let mut order = $order;
+                                               if $raa.is_none() {
+                                                       order = RAACommitmentOrder::CommitmentFirst;
+                                               }
+                                               break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
                                        }
-                                       break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
                                }
                        }
 
@@ -1852,13 +1837,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, channel_state, chan_entry);
-                                                               break result;
-                                                       }
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_err!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, channel_state, chan_entry);
+                                                       break result;
                                                }
                                        }
 
@@ -2474,19 +2458,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        channel_state, chan)
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
-                                                       // Note that MonitorUpdateFailed here indicates (per function docs)
-                                                       // that we will resend the commitment update once monitor updating
-                                                       // is restored. Therefore, we must return an error indicating that
-                                                       // it is unsafe to retry the payment wholesale, which we do in the
-                                                       // send_payment check for MonitorUpdateFailed, below.
-                                                       insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
-                                                       return Err(APIError::MonitorUpdateFailed);
+                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                               let chan_id = chan.get().channel_id();
+                                               match (update_err,
+                                                       handle_monitor_err!(self, update_err, channel_state, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false, true))
+                                               {
+                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {
+                                                               insert_outbound_payment!();
+                                                       },
+                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+                                                               // Note that MonitorUpdateFailed here indicates (per function docs)
+                                                               // that we will resend the commitment update once monitor updating
+                                                               // is restored. Therefore, we must return an error indicating that
+                                                               // it is unsafe to retry the payment wholesale, which we do in the
+                                                               // send_payment check for MonitorUpdateFailed, below.
+                                                               insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
+                                                               return Err(APIError::MonitorUpdateFailed);
+                                                       },
+                                                       _ => unreachable!(),
                                                }
-                                               insert_outbound_payment!();
 
-                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
+                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: path.first().unwrap().pubkey,
                                                        updates: msgs::CommitmentUpdate {
@@ -3203,9 +3197,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        continue;
                                                                }
                                                        };
-                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                               handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
-                                                               continue;
+                                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {},
+                                                               e => {
+                                                                       handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+                                                                       continue;
+                                                               }
                                                        }
                                                        log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
                                                                add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
@@ -3474,23 +3471,26 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                let ret_err = match res {
                        Ok(Some((update_fee, commitment_signed, monitor_update))) => {
-                               if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                       let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
-                                       if drop { retain_channel = false; }
-                                       res
-                               } else {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: chan.get_counterparty_node_id(),
-                                               updates: msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs: Vec::new(),
-                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                       update_fee: Some(update_fee),
-                                                       commitment_signed,
-                                               },
-                                       });
-                                       Ok(())
+                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                       ChannelMonitorUpdateStatus::Completed => {
+                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id: chan.get_counterparty_node_id(),
+                                                       updates: msgs::CommitmentUpdate {
+                                                               update_add_htlcs: Vec::new(),
+                                                               update_fulfill_htlcs: Vec::new(),
+                                                               update_fail_htlcs: Vec::new(),
+                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                               update_fee: Some(update_fee),
+                                                               commitment_signed,
+                                                       },
+                                               });
+                                               Ok(())
+                                       },
+                                       e => {
+                                               let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+                                               if drop { retain_channel = false; }
+                                               res
+                                       }
                                }
                        },
                        Ok(None) => Ok(()),
@@ -4074,15 +4074,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
                                        if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
-                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
-                                                               payment_preimage, e);
-                                                       return ClaimFundsFromHop::MonitorUpdateFail(
-                                                               chan.get().get_counterparty_node_id(),
-                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
-                                                               Some(htlc_value_msat)
-                                                       );
+                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       ChannelMonitorUpdateStatus::Completed => {},
+                                                       e => {
+                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+                                                                       "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                                       payment_preimage, e);
+                                                               return ClaimFundsFromHop::MonitorUpdateFail(
+                                                                       chan.get().get_counterparty_node_id(),
+                                                                       handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                                       Some(htlc_value_msat)
+                                                               );
+                                                       }
                                                }
                                                if let Some((msg, commitment_signed)) = msgs {
                                                        log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
@@ -4105,10 +4108,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                },
                                Err((e, monitor_update)) => {
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
-                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
-                                                       payment_preimage, e);
+                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+                                                               "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                               payment_preimage, e);
+                                               },
                                        }
                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
                                        let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
@@ -4215,9 +4221,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // We update the ChannelMonitor on the backward link, after
                                        // receiving an offchain preimage event from the forward link (the
                                        // event being update_fulfill_htlc).
-                                       if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+                                       let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
                                                log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                                                        payment_preimage, e);
+                                                                                        payment_preimage, update_res);
                                        }
                                        // Note that we do *not* set `claimed_htlc` to false here. In fact, this
                                        // totally could be a duplicate claim, but we have no way of knowing
@@ -4482,29 +4489,28 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
                // lock before watch_channel
-               if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
-                       match e {
-                               ChannelMonitorUpdateErr::PermanentFailure => {
-                                       // Note that we reply with the new channel_id in error messages if we gave up on the
-                                       // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                                       // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                                       // any messages referencing a previously-closed channel anyway.
-                                       // We do not do a force-close here as that would generate a monitor update for
-                                       // a monitor that we didn't manage to store (and that we don't care about - we
-                                       // don't respond with the funding_signed so the channel can never go on chain).
-                                       let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
-                                       assert!(failed_htlcs.is_empty());
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
-                               },
-                               ChannelMonitorUpdateErr::TemporaryFailure => {
-                                       // There's no problem signing a counterparty's funding transaction if our monitor
-                                       // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                                       // accepted payment from yet. We do, however, need to wait to send our channel_ready
-                                       // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
-                                       channel_ready = None; // Don't send the channel_ready now
-                               },
-                       }
+               match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
+                       ChannelMonitorUpdateStatus::Completed => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               // Note that we reply with the new channel_id in error messages if we gave up on the
+                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                               // any messages referencing a previously-closed channel anyway.
+                               // We do not propagate the monitor update to the user as it would be for a monitor
+                               // that we didn't manage to store (and that we don't care about - we don't respond
+                               // with the funding_signed so the channel can never go on chain).
+                               let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
+                               assert!(failed_htlcs.is_empty());
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
+                       },
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               // There's no problem signing a counterparty's funding transaction if our monitor
+                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
+                               // until we have persisted our monitor.
+                               chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+                               channel_ready = None; // Don't send the channel_ready now
+                       },
                }
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -4551,17 +4557,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
-                                       if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
-                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                       // monitor update contained within `shutdown_finish` was applied.
-                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                               shutdown_finish.0.take();
+                                       match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                                       if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                               // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                               // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                               // monitor update contained within `shutdown_finish` was applied.
+                                                               if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                                       shutdown_finish.0.take();
+                                                               }
                                                        }
-                                               }
-                                               return res
+                                                       return res
+                                               },
                                        }
                                        if let Some(msg) = channel_ready {
                                                send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
@@ -4635,13 +4644,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, channel_state, chan_entry);
-                                                               break result;
-                                                       }
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_err!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, channel_state, chan_entry);
+                                                       break result;
                                                }
                                        }
 
@@ -4830,9 +4838,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                },
                                                Ok(res) => res
                                        };
-                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                       return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
+                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                               if let Err(e) = handle_monitor_err!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+                                       return Err(e);
                                }
+
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
                                        node_id: counterparty_node_id.clone(),
                                        msg: revoke_and_ack,
@@ -4904,26 +4914,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
                                        let raa_updates = break_chan_entry!(self,
                                                chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
                                        htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
-                                               if was_frozen_for_monitor {
-                                                       assert!(raa_updates.commitment_update.is_none());
-                                                       assert!(raa_updates.accepted_htlcs.is_empty());
-                                                       assert!(raa_updates.failed_htlcs.is_empty());
-                                                       assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                                       break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
-                                               } else {
-                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
-                                                                       RAACommitmentOrder::CommitmentFirst, false,
-                                                                       raa_updates.commitment_update.is_some(), false,
-                                                                       raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                                       raa_updates.finalized_claimed_htlcs) {
-                                                               break Err(e);
-                                                       } else { unreachable!(); }
-                                               }
+                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+                                       if was_paused_for_mon_update {
+                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+                                               assert!(raa_updates.commitment_update.is_none());
+                                               assert!(raa_updates.accepted_htlcs.is_empty());
+                                               assert!(raa_updates.failed_htlcs.is_empty());
+                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+                                       }
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                                               if let Err(e) = handle_monitor_err!(self, update_res, channel_state, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false,
+                                                               raa_updates.commitment_update.is_some(), false,
+                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                               raa_updates.finalized_claimed_htlcs) {
+                                                       break Err(e);
+                                               } else { unreachable!(); }
                                        }
                                        if let Some(updates) = raa_updates.commitment_update {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -5136,7 +5147,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        });
                                                }
                                        },
-                                       MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+                                       MonitorEvent::Completed { funding_txo, monitor_update_id } => {
                                                self.channel_monitor_updated(&funding_txo, monitor_update_id);
                                        },
                                }
@@ -5188,16 +5199,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        ));
                                                }
                                                if let Some((commitment_update, monitor_update)) = commitment_opt {
-                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                                               has_monitor_update = true;
-                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
-                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
-                                                               if close_channel { return false; }
-                                                       } else {
-                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                       node_id: chan.get_counterparty_node_id(),
-                                                                       updates: commitment_update,
-                                                               });
+                                                       match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {
+                                                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                               node_id: chan.get_counterparty_node_id(),
+                                                                               updates: commitment_update,
+                                                                       });
+                                                               },
+                                                               e => {
+                                                                       has_monitor_update = true;
+                                                                       let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+                                                                       handle_errors.push((chan.get_counterparty_node_id(), res));
+                                                                       if close_channel { return false; }
+                                                               },
                                                        }
                                                }
                                                true
index 20e51ba2c8397199e26234f115da8553adecdf55..43dfef5d188bc51d8d0aa65df6965ef9ccaf7b1a 100644 (file)
@@ -10,7 +10,7 @@
 //! A bunch of useful utilities for building networks of nodes and exchanging messages between
 //! nodes for functional tests.
 
-use chain::{BestBlock, Confirm, Listen, Watch, keysinterface::KeysInterface};
+use chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, keysinterface::KeysInterface};
 use chain::channelmonitor::ChannelMonitor;
 use chain::transaction::OutPoint;
 use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
@@ -393,7 +393,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
-                               if let Err(_) = chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
+                               if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != ChannelMonitorUpdateStatus::Completed {
                                        panic!();
                                }
                        }
index b0b51bfe8628c20db50d5cf247055bb0abb0df1a..e5378e8ff0f85828b2c705870ac48aaf64dc0789 100644 (file)
@@ -12,7 +12,7 @@
 //! claim outputs on-chain.
 
 use chain;
-use chain::{Confirm, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use chain::chaininterface::LowerBoundedFeeEstimator;
 use chain::channelmonitor;
 use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
@@ -2335,7 +2335,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[3].node.list_channels().len(), 0);
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
-       nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon).unwrap();
+       assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
+               ChannelMonitorUpdateStatus::Completed);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
        check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
 }
@@ -4021,7 +4022,8 @@ fn test_funding_peer_disconnect() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -4389,7 +4391,8 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        check_added_monitors!(nodes[0], 1);
@@ -4501,7 +4504,8 @@ fn test_manager_serialize_deserialize_events() {
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
 
        // After deserializing, make sure the funding_transaction is still held by the channel manager
@@ -4585,7 +4589,8 @@ fn test_simple_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -4697,7 +4702,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        for monitor in node_0_monitors.drain(..) {
-               assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
+               assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+                       ChannelMonitorUpdateStatus::Completed);
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
@@ -7533,7 +7539,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
                }).unwrap().1
        };
        nodes[0].node = &node_state_0;
-       assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
+       assert_eq!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].chain_monitor = &monitor;
        nodes[0].chain_source = &chain_source;
 
@@ -8826,7 +8833,8 @@ fn test_bad_secret_hash() {
 fn test_update_err_monitor_lockdown() {
        // Our monitor will lock update of local commitment transaction if a broadcastion condition
        // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
-       // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateErr.
+       // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
+       // error.
        //
        // This scenario may happen in a watchtower setup, where watchtower process a block height
        // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
@@ -8859,7 +8867,7 @@ fn test_update_err_monitor_lockdown() {
                                &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
@@ -8879,8 +8887,8 @@ fn test_update_err_monitor_lockdown() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       if let Err(_) =  watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       assert_eq!(watchtower.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure);
+                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
@@ -8923,7 +8931,7 @@ fn test_concurrent_monitor_claim() {
                                &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
@@ -8952,7 +8960,7 @@ fn test_concurrent_monitor_claim() {
                                &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
@@ -8971,9 +8979,9 @@ fn test_concurrent_monitor_claim() {
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Watchtower Alice should already have seen the block and reject the update
-                       if let Err(_) =  watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure);
+                       assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::Completed);
+                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
@@ -9746,8 +9754,10 @@ fn test_forwardable_regen() {
        nodes_1_deserialized = nodes_1_deserialized_tmp;
        assert!(nodes_1_read.is_empty());
 
-       assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
-       assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok());
+       assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
+       assert_eq!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[1].node = &nodes_1_deserialized;
        check_added_monitors!(nodes[1], 2);
 
@@ -10211,7 +10221,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        for monitor in monitors {
                // On startup the preimage should have been copied into the non-persisted monitor:
                assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
-               nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
+               assert_eq!(nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor),
+                       ChannelMonitorUpdateStatus::Completed);
        }
        check_added_monitors!(nodes[3], 2);
 
index 02a3185e1509778ba13d38daa9d205fcbe07c3f8..18c8e3803521494b86e49e1e063f76f84b87d09c 100644 (file)
@@ -11,7 +11,7 @@
 //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
 //! payments thereafter.
 
-use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::OutPoint;
 use chain::keysinterface::KeysInterface;
@@ -436,7 +436,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -643,10 +644,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
                        $chan_manager = nodes_0_deserialized_tmp;
                        assert!(nodes_0_read.is_empty());
 
-                       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+                       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+                               ChannelMonitorUpdateStatus::Completed);
                        if !chan_1_monitor_serialized.0.is_empty() {
                                let funding_txo = chan_1_monitor.as_ref().unwrap().get_funding_txo().0;
-                               assert!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()).is_ok());
+                               assert_eq!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()),
+                                       ChannelMonitorUpdateStatus::Completed);
                        }
                        nodes[0].node = &$chan_manager;
                        check_added_monitors!(nodes[0], if !chan_1_monitor_serialized.0.is_empty() { 2 } else { 1 });
@@ -853,10 +856,10 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        }
 
        // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
-       // returning TemporaryFailure. This should cause the claim event to never make its way to the
+       // returning InProgress. This should cause the claim event to never make its way to the
        // ChannelManager.
        chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        if payment_timeout {
                connect_blocks(&nodes[0], 1);
@@ -881,7 +884,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
        // payment sent event.
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
        for update in mon_updates {
@@ -925,7 +928,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        };
        nodes_0_deserialized = nodes_0_deserialized_tmp;
 
-       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        check_added_monitors!(nodes[0], 1);
        nodes[0].node = &nodes_0_deserialized;
 
@@ -1015,7 +1019,8 @@ fn test_fulfill_restart_failure() {
        };
        nodes_1_deserialized = nodes_1_deserialized_tmp;
 
-       assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+               ChannelMonitorUpdateStatus::Completed);
        check_added_monitors!(nodes[1], 1);
        nodes[1].node = &nodes_1_deserialized;
 
index db8c11e4c1eab56a39858375447e8a9f30593291..7a5b62e00d55226bbccc6bde57dfd3a395a398a9 100644 (file)
@@ -11,7 +11,7 @@
 //! other behavior that exists only on private channels or with a semi-trusted counterparty (eg
 //! LSP).
 
-use chain::{ChannelMonitorUpdateErr, Watch};
+use chain::{ChannelMonitorUpdateStatus, Watch};
 use chain::channelmonitor::ChannelMonitor;
 use chain::keysinterface::{Recipient, KeysInterface};
 use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
@@ -137,8 +137,10 @@ fn test_priv_forwarding_rejection() {
        assert!(nodes_1_read.is_empty());
        nodes_1_deserialized = nodes_1_deserialized_tmp;
 
-       assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
-       assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
+       assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a),
+               ChannelMonitorUpdateStatus::Completed);
+       assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b),
+               ChannelMonitorUpdateStatus::Completed);
        check_added_monitors!(nodes[1], 2);
        nodes[1].node = &nodes_1_deserialized;
 
@@ -624,7 +626,7 @@ fn test_0conf_channel_with_async_monitor() {
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
        let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
@@ -634,7 +636,7 @@ fn test_0conf_channel_with_async_monitor() {
 
        let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(bs_signed_locked.len(), 2);
-       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
 
        match &bs_signed_locked[0] {
                MessageSendEvent::SendFundingSigned { node_id, msg } => {
@@ -680,8 +682,8 @@ fn test_0conf_channel_with_async_monitor() {
                _ => panic!("Unexpected event"),
        };
 
-       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_channel_update);
        nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_channel_update);
@@ -710,12 +712,12 @@ fn test_0conf_channel_with_async_monitor() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
        check_added_monitors!(nodes[0], 1);
 
-       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
        check_added_monitors!(nodes[1], 0);
index 507dc34dfbe44a66a9505adb7e7aba60fca3951c..cdda13183d2b4eb50b2836a61e69bddb0070b134 100644 (file)
@@ -11,7 +11,7 @@
 
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
 use chain::transaction::OutPoint;
-use chain::{Confirm, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
 use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs};
 use ln::msgs::ChannelMessageHandler;
 use util::enforcing_trait_impls::EnforcingSigner;
@@ -342,7 +342,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
                }
 
-               nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+               assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
+                       ChannelMonitorUpdateStatus::Completed);
                check_added_monitors!(nodes[0], 1);
        }
 
index 820bf31c6e090931634b2c58be1f6254060d7c98..15dd202b9c83a96064257c3fa528592021594eb8 100644 (file)
@@ -46,8 +46,12 @@ pub enum APIError {
                /// A human-readable error message
                err: String
        },
-       /// An attempt to call watch/update_channel returned an Err (ie you did this!), causing the
-       /// attempted action to fail.
+       /// An attempt to call watch/update_channel returned a
+       /// [`ChannelMonitorUpdateStatus::InProgress`] indicating the persistence of a monitor update
+       /// is awaiting async resolution. Once it resolves the attempted action should complete
+       /// automatically.
+       ///
+       /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
        MonitorUpdateFailed,
        /// [`KeysInterface::get_shutdown_scriptpubkey`] returned a shutdown scriptpubkey incompatible
        /// with the channel counterparty as negotiated in [`InitFeatures`].
index d04c3430bcc90de0ec7211c4ff12be4a8e2d6bd6..da55c38164516aa75468f7d841cdb720a61d9ac9 100644 (file)
@@ -72,15 +72,19 @@ impl<ChannelSigner: Sign, K: KVStorePersister> Persist<ChannelSigner> for K {
        // A PermanentFailure implies we need to shut down since we're force-closing channels without
        // even broadcasting!
 
-       fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               self.persist(&key, monitor)
-                       .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
+               match self.persist(&key, monitor) {
+                       Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+                       Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
+               }
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               self.persist(&key, monitor)
-                       .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
+               match self.persist(&key, monitor) {
+                       Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+                       Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
+               }
        }
 }
index 4ce51fc8bedc252c9eb59f45aa2564db29b07b5c..9b2f222c519f9c672bf6d538ef23898de2e8020f 100644 (file)
@@ -126,7 +126,7 @@ impl<'a> TestChainMonitor<'a> {
        }
 }
 impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
@@ -140,7 +140,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                self.chain_monitor.watch_channel(funding_txo, new_monitor)
        }
 
-       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
                // Every monitor update should survive roundtrip
                let mut w = TestVecWriter(Vec::new());
                update.write(&mut w).unwrap();
@@ -178,10 +178,10 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
 }
 
 pub struct TestPersister {
-       pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+       pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
        /// If this is set to Some(), after the next return, we'll always return this until update_ret
        /// is changed:
-       pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
+       pub next_update_ret: Mutex<Option<chain::ChannelMonitorUpdateStatus>>,
        /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
        /// MonitorUpdateId here.
        pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
@@ -192,23 +192,23 @@ pub struct TestPersister {
 impl TestPersister {
        pub fn new() -> Self {
                Self {
-                       update_ret: Mutex::new(Ok(())),
+                       update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed),
                        next_update_ret: Mutex::new(None),
                        chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
                        offchain_monitor_updates: Mutex::new(HashMap::new()),
                }
        }
 
-       pub fn set_update_ret(&self, ret: Result<(), chain::ChannelMonitorUpdateErr>) {
+       pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) {
                *self.update_ret.lock().unwrap() = ret;
        }
 
-       pub fn set_next_update_ret(&self, next_ret: Option<Result<(), chain::ChannelMonitorUpdateErr>>) {
+       pub fn set_next_update_ret(&self, next_ret: Option<chain::ChannelMonitorUpdateStatus>) {
                *self.next_update_ret.lock().unwrap() = next_ret;
        }
 }
 impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
                        *self.update_ret.lock().unwrap() = next_ret;
@@ -216,7 +216,7 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
                ret
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
                        *self.update_ret.lock().unwrap() = next_ret;