use bitcoin::hash_types::{BlockHash, WPubkeyHash};
use lightning::chain;
-use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::chain::transaction::OutPoint;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
}
}
impl chain::Watch<EnforcingSigner> for TestChainMonitor {
- fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
let mut ser = VecWriter(Vec::new());
monitor.write(&mut ser).unwrap();
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
self.chain_monitor.watch_channel(funding_txo, monitor)
}
- fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
let mut map_lock = self.latest_monitors.lock().unwrap();
let mut map_entry = match map_lock.entry(funding_txo) {
hash_map::Entry::Occupied(entry) => entry,
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
- Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
+ Arc::new(TestPersister {
+ update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+ }), Arc::clone(&keys_manager)));
let mut config = UserConfig::default();
config.channel_config.forwarding_fee_proportional_millionths = 0;
let keys_manager = Arc::clone(& $keys_manager);
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
- Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
+ Arc::new(TestPersister {
+ update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
+ }), Arc::clone(& $keys_manager)));
let mut config = UserConfig::default();
config.channel_config.forwarding_fee_proportional_millionths = 0;
let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone());
for (funding_txo, mon) in monitors.drain() {
- assert!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon).is_ok());
+ assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon),
+ ChannelMonitorUpdateStatus::Completed);
}
res
} }
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
- 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
- 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
- 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
+ 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+ 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+ 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::InProgress,
+ 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+ 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
+ 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed,
0x08 => {
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
// after we resolve all pending events.
// First make sure there are no pending monitor updates, resetting the error state
// and calling force_channel_monitor_updated for each monitor.
- *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
- *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
- *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
+ *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+ *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
+ *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed;
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
use lightning::chain;
-use lightning::chain::{BestBlock, Confirm, Listen};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen};
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::chainmonitor;
use lightning::chain::transaction::OutPoint;
let broadcast = Arc::new(TestBroadcaster{ txn_broadcasted: Mutex::new(Vec::new()) });
let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(),
- Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) })));
+ Arc::new(TestPersister { update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) })));
let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), inbound_payment_key: KeyMaterial(inbound_payment_key.try_into().unwrap()), counter: AtomicU64::new(0) });
let mut config = UserConfig::default();
use std::sync::Mutex;
pub struct TestPersister {
- pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+ pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
}
impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
- fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
self.update_ret.lock().unwrap().clone()
}
- fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
self.update_ret.lock().unwrap().clone()
}
}
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::hashes::hex::FromHex;
use bitcoin::{Txid, TxMerkleNode};
- use lightning::chain::ChannelMonitorUpdateErr;
+ use lightning::chain::ChannelMonitorUpdateStatus;
use lightning::chain::chainmonitor::Persist;
use lightning::chain::transaction::OutPoint;
use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
index: 0
};
match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
- Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
+ ChannelMonitorUpdateStatus::PermanentFailure => {},
_ => panic!("unexpected result from persisting new channel")
}
index: 0
};
match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
- Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
+ ChannelMonitorUpdateStatus::PermanentFailure => {},
_ => panic!("unexpected result from persisting new channel")
}
use bitcoin::hash_types::Txid;
use chain;
-use chain::{ChannelMonitorUpdateErr, Filter, WatchedOutput};
+use chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, LATENCY_GRACE_PERIOD_BLOCKS};
use chain::transaction::{OutPoint, TransactionData};
///
/// Each method can return three possible values:
/// * If persistence (including any relevant `fsync()` calls) happens immediately, the
-/// implementation should return `Ok(())`, indicating normal channel operation should continue.
+/// implementation should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal
+/// channel operation should continue.
/// * If persistence happens asynchronously, implementations should first ensure the
/// [`ChannelMonitor`] or [`ChannelMonitorUpdate`] are written durably to disk, and then return
-/// `Err(ChannelMonitorUpdateErr::TemporaryFailure)` while the update continues in the
-/// background. Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be
-/// called with the corresponding [`MonitorUpdateId`].
+/// [`ChannelMonitorUpdateStatus::InProgress`] while the update continues in the background.
+/// Once the update completes, [`ChainMonitor::channel_monitor_updated`] should be called with
+/// the corresponding [`MonitorUpdateId`].
///
/// Note that unlike the direct [`chain::Watch`] interface,
/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
///
/// * If persistence fails for some reason, implementations should return
-/// `Err(ChannelMonitorUpdateErr::PermanentFailure)`, in which case the channel will likely be
+/// [`ChannelMonitorUpdateStatus::PermanentFailure`], in which case the channel will likely be
/// closed without broadcasting the latest state. See
-/// [`ChannelMonitorUpdateErr::PermanentFailure`] for more details.
+/// [`ChannelMonitorUpdateStatus::PermanentFailure`] for more details.
pub trait Persist<ChannelSigner: Sign> {
/// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
/// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
/// and the stored channel data). Note that you **must** persist every new monitor to disk.
///
/// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
- /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+ /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
///
/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
- /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+ /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+ fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
/// update.
/// whereas updates are small and `O(1)`.
///
/// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
- /// if you return [`ChannelMonitorUpdateErr::TemporaryFailure`].
+ /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
///
/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
/// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
- /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+ /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
+ fn update_persisted_channel(&self, channel_id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
}
struct MonitorHolder<ChannelSigner: Sign> {
/// The full set of pending monitor updates for this Channel.
///
/// Note that this lock must be held during updates to prevent a race where we call
- /// update_persisted_channel, the user returns a TemporaryFailure, and then calls
- /// channel_monitor_updated immediately, racing our insertion of the pending update into the
- /// contained Vec.
+ /// update_persisted_channel, the user returns a
+ /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
+ /// immediately, racing our insertion of the pending update into the contained Vec.
///
/// Beyond the synchronization of updates themselves, we cannot handle user events until after
/// any chain updates have been stored on disk. Thus, we scan this list when returning updates
if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
// If there are not ChainSync persists awaiting completion, go ahead and
// set last_chain_persist_height here - we wouldn't want the first
- // TemporaryFailure to always immediately be considered "overly delayed".
+ // InProgress to always immediately be considered "overly delayed".
monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
}
}
log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
- Ok(()) =>
+ ChannelMonitorUpdateStatus::Completed =>
log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
- Err(ChannelMonitorUpdateErr::PermanentFailure) => {
+ ChannelMonitorUpdateStatus::PermanentFailure => {
monitor_state.channel_perm_failed.store(true, Ordering::Release);
self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
},
- Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
+ ChannelMonitorUpdateStatus::InProgress => {
log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
pending_monitor_updates.push(update_id);
},
}
/// Indicates the persistence of a [`ChannelMonitor`] has completed after
- /// [`ChannelMonitorUpdateErr::TemporaryFailure`] was returned from an update operation.
+ /// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
///
/// Thus, the anticipated use is, at a high level:
/// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
/// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
- /// returning [`ChannelMonitorUpdateErr::TemporaryFailure`],
+ /// returning [`ChannelMonitorUpdateStatus::InProgress`],
/// 2) once all remote copies are updated, you call this function with the
/// `completed_update_id` that completed, and once all pending updates have completed the
/// channel will be re-enabled.
if monitor_is_pending_updates || monitor_data.channel_perm_failed.load(Ordering::Acquire) {
// If there are still monitor updates pending (or an old monitor update
// finished after a later one perm-failed), we cannot yet construct an
- // UpdateCompleted event.
+ // Completed event.
return Ok(());
}
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
funding_txo,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
}], monitor_data.monitor.get_counterparty_node_id()));
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted {
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
funding_txo,
monitor_update_id,
}], counterparty_node_id));
///
/// Note that we persist the given `ChannelMonitor` while holding the `ChainMonitor`
/// monitors lock.
- fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus {
let mut monitors = self.monitors.write().unwrap();
let entry = match monitors.entry(funding_outpoint) {
hash_map::Entry::Occupied(_) => {
log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
- return Err(ChannelMonitorUpdateErr::PermanentFailure)},
+ return ChannelMonitorUpdateStatus::PermanentFailure
+ },
hash_map::Entry::Vacant(e) => e,
};
log_trace!(self.logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
let update_id = MonitorUpdateId::from_new_monitor(&monitor);
let mut pending_monitor_updates = Vec::new();
let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
- if persist_res.is_err() {
- log_error!(self.logger, "Failed to persist new ChannelMonitor for channel {}: {:?}", log_funding_info!(monitor), persist_res);
- } else {
- log_trace!(self.logger, "Finished persisting new ChannelMonitor for channel {}", log_funding_info!(monitor));
- }
- if persist_res == Err(ChannelMonitorUpdateErr::PermanentFailure) {
- return persist_res;
- } else if persist_res.is_err() {
- pending_monitor_updates.push(update_id);
+ match persist_res {
+ ChannelMonitorUpdateStatus::InProgress => {
+ log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
+ pending_monitor_updates.push(update_id);
+ },
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ log_error!(self.logger, "Persistence of new ChannelMonitor for channel {} failed", log_funding_info!(monitor));
+ return persist_res;
+ },
+ ChannelMonitorUpdateStatus::Completed => {
+ log_info!(self.logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
+ }
}
if let Some(ref chain_source) = self.chain_source {
monitor.load_outputs_to_watch(chain_source);
/// Note that we persist the given `ChannelMonitor` update while holding the
/// `ChainMonitor` monitors lock.
- fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
// Update the monitor that watches the channel referred to by the given outpoint.
let monitors = self.monitors.read().unwrap();
match monitors.get(&funding_txo) {
#[cfg(any(test, fuzzing))]
panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
#[cfg(not(any(test, fuzzing)))]
- Err(ChannelMonitorUpdateErr::PermanentFailure)
+ ChannelMonitorUpdateStatus::PermanentFailure
},
Some(monitor_state) => {
let monitor = &monitor_state.monitor;
let update_id = MonitorUpdateId::from_monitor_update(&update);
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
let persist_res = self.persister.update_persisted_channel(funding_txo, &Some(update), monitor, update_id);
- if let Err(e) = persist_res {
- if e == ChannelMonitorUpdateErr::TemporaryFailure {
+ match persist_res {
+ ChannelMonitorUpdateStatus::InProgress => {
pending_monitor_updates.push(update_id);
- } else {
+ log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
+ },
+ ChannelMonitorUpdateStatus::PermanentFailure => {
monitor_state.channel_perm_failed.store(true, Ordering::Release);
- }
- log_error!(self.logger, "Failed to persist ChannelMonitor update for channel {}: {:?}", log_funding_info!(monitor), e);
- } else {
- log_trace!(self.logger, "Finished persisting ChannelMonitor update for channel {}", log_funding_info!(monitor));
+ log_error!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} failed", log_funding_info!(monitor));
+ },
+ ChannelMonitorUpdateStatus::Completed => {
+ log_debug!(self.logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+ },
}
if update_res.is_err() {
- Err(ChannelMonitorUpdateErr::PermanentFailure)
+ ChannelMonitorUpdateStatus::PermanentFailure
} else if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
- Err(ChannelMonitorUpdateErr::PermanentFailure)
+ ChannelMonitorUpdateStatus::PermanentFailure
} else {
persist_res
}
use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
- use chain::{ChannelMonitorUpdateErr, Confirm, Watch};
+ use chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
use ln::channelmanager::{self, PaymentSendFailure};
use ln::functional_test_utils::*;
let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
assert_eq!(persistences.len(), 1);
// Temp-fail the block connection which will hold the channel-closed event
chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
// channel is now closed, but the ChannelManager doesn't know that yet.
// If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
// the update through to the ChannelMonitor which will refuse it (as the channel is closed).
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
unwrap_send_err!(nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)),
true, APIError::ChannelUnavailable { ref err },
assert!(err.contains("ChannelMonitor storage failure")));
create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
connect_blocks(&nodes[0], 1);
// Before processing events, the ChannelManager will still think the Channel is open and
/// increasing and increase by one for each new update, with one exception specified below.
///
/// This sequence number is also used to track up to which points updates which returned
- /// [`ChannelMonitorUpdateErr::TemporaryFailure`] have been applied to all copies of a given
+ /// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
/// The only instance where update_id values are not strictly increasing is the case where we
/// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
/// its docs for more details.
///
- /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
}
CommitmentTxConfirmed(OutPoint),
/// Indicates a [`ChannelMonitor`] update has completed. See
- /// [`ChannelMonitorUpdateErr::TemporaryFailure`] for more information on how this is used.
+ /// [`ChannelMonitorUpdateStatus::InProgress`] for more information on how this is used.
///
- /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
- UpdateCompleted {
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
+ Completed {
/// The funding outpoint of the [`ChannelMonitor`] that was updated
funding_txo: OutPoint,
/// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
},
/// Indicates a [`ChannelMonitor`] update has failed. See
- /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more information on how this is used.
+ /// [`ChannelMonitorUpdateStatus::PermanentFailure`] for more information on how this is used.
///
- /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
+ /// [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
UpdateFailed(OutPoint),
}
impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
- // Note that UpdateCompleted and UpdateFailed are currently never serialized to disk as they are
+ // Note that Completed and UpdateFailed are currently never serialized to disk as they are
// generated only in ChainMonitor
- (0, UpdateCompleted) => {
+ (0, Completed) => {
(0, funding_txo, required),
(2, monitor_update_id, required),
},
/// the Channel was out-of-date.
///
/// You may also use this to broadcast the latest local commitment transaction, either because
- /// a monitor update failed with [`ChannelMonitorUpdateErr::PermanentFailure`] or because we've
- /// fallen behind (i.e we've received proof that our counterparty side knows a revocation
+ /// a monitor update failed with [`ChannelMonitorUpdateStatus::PermanentFailure`] or because we've
+ /// fallen behind (i.e. we've received proof that our counterparty side knows a revocation
/// secret we gave them that they shouldn't know).
///
/// Broadcasting these transactions in the second case is UNSAFE, as they allow counterparty
/// may be to contact the other node operator out-of-band to coordinate other options available
/// to you. In any-case, the choice is up to you.
///
- /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
+ /// [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
where L::Target: Logger {
self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger)
fn get_relevant_txids(&self) -> Vec<Txid>;
}
-/// An error enum representing a failure to persist a channel monitor update.
+/// An enum representing the status of a channel monitor update persistence.
#[derive(Clone, Copy, Debug, PartialEq)]
-pub enum ChannelMonitorUpdateErr {
+pub enum ChannelMonitorUpdateStatus {
+ /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
+ /// have been updated.
+ ///
+ /// This includes performing any `fsync()` calls required to ensure the update is guaranteed to
+ /// be available on restart even if the application crashes.
+ Completed,
/// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
/// our state failed, but is expected to succeed at some point in the future).
///
/// Such a failure will "freeze" a channel, preventing us from revoking old states or
/// submitting new commitment transactions to the counterparty. Once the update(s) which failed
- /// have been successfully applied, a [`MonitorEvent::UpdateCompleted`] can be used to restore
- /// the channel to an operational state.
+ /// have been successfully applied, a [`MonitorEvent::Completed`] can be used to restore the
+ /// channel to an operational state.
///
/// Note that a given [`ChannelManager`] will *never* re-generate a [`ChannelMonitorUpdate`].
/// If you return this error you must ensure that it is written to disk safely before writing
/// attempting to claim it on this channel) and those updates must still be persisted.
///
/// No updates to the channel will be made which could invalidate other [`ChannelMonitor`]s
- /// until a [`MonitorEvent::UpdateCompleted`] is provided, even if you return no error on a
- /// later monitor update for the same channel.
+ /// until a [`MonitorEvent::Completed`] is provided, even if you return no error on a later
+ /// monitor update for the same channel.
///
/// For deployments where a copy of ChannelMonitors and other local state are backed up in a
/// remote location (with local copies persisted immediately), it is anticipated that all
- /// updates will return TemporaryFailure until the remote copies could be updated.
+ /// updates will return [`InProgress`] until the remote copies could be updated.
///
- /// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+ /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
+ /// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- TemporaryFailure,
+ InProgress,
/// Used to indicate no further channel monitor updates will be allowed (likely a disk failure
/// or a remote copy of this [`ChannelMonitor`] is no longer reachable and thus not updatable).
///
/// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
/// lagging behind on block processing.
///
- /// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+ /// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
PermanentFailure,
}
/// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing
/// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it
/// could result in a revoked transaction being broadcast, allowing the counterparty to claim all
-/// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle
+/// funds in the channel. See [`ChannelMonitorUpdateStatus`] for more details about how to handle
/// multiple instances.
///
-/// [`PermanentFailure`]: ChannelMonitorUpdateErr::PermanentFailure
+/// [`PermanentFailure`]: ChannelMonitorUpdateStatus::PermanentFailure
pub trait Watch<ChannelSigner: Sign> {
/// Watches a channel identified by `funding_txo` using `monitor`.
///
/// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
/// calling [`block_connected`] and [`block_disconnected`] on the monitor.
///
- /// Note: this interface MUST error with [`ChannelMonitorUpdateErr::PermanentFailure`] if
+ /// Note: this interface MUST error with [`ChannelMonitorUpdateStatus::PermanentFailure`] if
/// the given `funding_txo` has previously been registered via `watch_channel`.
///
/// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch
/// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected
/// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected
- fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
/// Updates a channel identified by `funding_txo` by applying `update` to its monitor.
///
/// Implementations must call [`update_monitor`] with the given update. See
- /// [`ChannelMonitorUpdateErr`] for invariants around returning an error.
+ /// [`ChannelMonitorUpdateStatus`] for invariants around returning an error.
///
/// [`update_monitor`]: channelmonitor::ChannelMonitor::update_monitor
- fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
+ fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus;
/// Returns any monitor events since the last call. Subsequent calls must only return new
/// events.
/// to disk.
///
/// For details on asynchronous [`ChannelMonitor`] updating and returning
- /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`].
+ /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
}
/// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter`
/// should not block on I/O. Implementations should instead queue the newly monitored data to be
/// processed later. Then, in order to block until the data has been processed, any [`Watch`]
-/// invocation that has called the `Filter` must return [`TemporaryFailure`].
+/// invocation that has called the `Filter` must return [`InProgress`].
///
-/// [`TemporaryFailure`]: ChannelMonitorUpdateErr::TemporaryFailure
+/// [`InProgress`]: ChannelMonitorUpdateStatus::InProgress
/// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki
/// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki
pub trait Filter {
// You may not use this file except in accordance with one or both of these
// licenses.
-//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
+//! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
//! monitor updates.
//! There are a bunch of these as their handling is relatively error-prone so they are split out
//! here. See also the chanmon_fail_consistency fuzz test.
use bitcoin::network::constants::Network;
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use chain::transaction::OutPoint;
-use chain::{ChannelMonitorUpdateErr, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
use ln::channel::AnnouncementSigsState;
use ln::msgs;
create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable {..}, {});
check_added_monitors!(nodes[0], 2);
&mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
- assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
+ assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
chain_mon
};
let header = BlockHeader {
};
chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
- // Set the persister's return value to be a TemporaryFailure.
- persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ // Set the persister's return value to be a InProgress.
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Try to update ChannelMonitor
nodes[1].node.claim_funds(preimage);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) {
if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- // Check that even though the persister is returning a TemporaryFailure,
+ // Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
- if let Err(ChannelMonitorUpdateErr::PermanentFailure) = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
- logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Failed to persist ChannelMonitor update for channel [0-9a-f]*: TemporaryFailure").unwrap(), 1);
- if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+ if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, update.clone()) {} else { panic!("Expected monitor error to be permanent"); }
+ logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else { assert!(false); };
let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
{
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), false, APIError::MonitorUpdateFailed, {});
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
// Now set it to failed again...
let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
{
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
check_added_monitors!(nodes[0], 1);
}
// * First we route a payment, then get a temporary monitor update failure when trying to
// route a second payment. We then claim the first payment.
// * If disconnect_count is set, we will disconnect at this point (which is likely as
- // TemporaryFailure likely indicates net disconnect which resulted in failing to update
- // the ChannelMonitor on a watchtower).
+ // InProgress likely indicates net disconnect which resulted in failing to update the
+ // ChannelMonitor on a watchtower).
// * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
// immediately, otherwise we wait disconnect and deliver them via the reconnect
// channel_reestablish processing (ie disconnect_count & 16 makes no sense if
// Now try to send a second payment which will fail to send
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
{
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)), false, APIError::MonitorUpdateFailed, {});
check_added_monitors!(nodes[0], 1);
}
}
// Now fix monitor updating...
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
assert!(updates.update_fee.is_none());
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
_ => panic!("Unexpected event"),
}
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
check_added_monitors!(nodes[1], 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
+ nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[0], 1);
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Now fail monitor updating.
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
}
- chanmon_cfgs[1].persister.set_update_ret(Ok(())); // We succeed in updating the monitor for the first channel
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
// Restore monitor updating, ensuring we immediately get a fail-back update and a
// update_add update.
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
.contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
// Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
// nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
// then restore channel monitor updates.
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
+ nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[1], 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
// nodes[1] should be AwaitingRAA here!
// Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
// update.
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
// Now un-fail the monitor, which will result in B sending its original commitment update,
// receiving the commitment update from A, and the resulting commitment dances.
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
check_added_monitors!(nodes[0], 1);
}
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let payment_event = SendEvent::from_event(events.pop().unwrap());
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let payment_event = SendEvent::from_event(events.pop().unwrap());
assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Deliver the final RAA for the first payment, which does not require a response. RAAs
// generally require a commitment_signed, so the fact that we're expecting an opposite response
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
// Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
// paused, so forward shouldn't succeed until we call channel_monitor_updated().
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
check_added_monitors!(nodes[1], 1);
assert_eq!(events.len(), 0);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
check_added_monitors!(nodes[0], 0);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
}
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
// Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
// (for the path 0 -> 2 -> 3) fails.
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
- chanmon_cfgs[0].persister.set_next_update_ret(Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+ chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress));
// Now check that we get the right return value, indicating that the first path succeeded but
// the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
} else { panic!(); }
check_added_monitors!(nodes[0], 2);
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
// Pass the first HTLC of the payment along to nodes[3].
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
check_added_monitors!(nodes[0], 0);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.claim_funds(payment_preimage_0);
check_added_monitors!(nodes[0], 1);
expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
nodes[0].node = &nodes_0_deserialized;
assert!(nodes_0_read.is_empty());
- nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
} else {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
// If we finish updating the monitor, we should free the holding cell right away (this did
// not occur prior to #756).
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
check_closed_broadcast!(nodes[0], true);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::PermanentFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// `claim_funds` results in a ChannelMonitorUpdate.
nodes[1].node.claim_funds(payment_preimage_1);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
// which had some asserts that prevented it from being called twice.
nodes[1].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
use bitcoin::{LockTime, secp256k1, Sequence};
use chain;
-use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
+use chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
///
/// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
/// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
- /// case of Ok(())) or will send once a [`MonitorEvent::UpdateCompleted`] is provided for the
+ /// case of Ok(())) or will send once a [`MonitorEvent::Completed`] is provided for the
/// next-hop channel with the latest update_id.
PartialFailure {
/// The errors themselves, in the same order as the route hops.
macro_rules! handle_monitor_err {
($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
match $err {
- ChannelMonitorUpdateErr::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
$chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
(res, true)
},
- ChannelMonitorUpdateErr::TemporaryFailure => {
- log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
+ ChannelMonitorUpdateStatus::InProgress => {
+ log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
log_bytes!($chan_id[..]),
if $resend_commitment && $resend_raa {
match $action_type {
$chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
},
+ ChannelMonitorUpdateStatus::Completed => {
+ (Ok(()), false)
+ },
}
};
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
};
}
-macro_rules! return_monitor_err {
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
- };
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- }
-}
-
-// Does not break in case of TemporaryFailure!
-macro_rules! maybe_break_monitor_err {
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
- (e, ChannelMonitorUpdateErr::PermanentFailure) => {
- break e;
- },
- (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
- }
- }
-}
-
macro_rules! send_channel_ready {
($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
// only case where we can get a new ChannelMonitorUpdate would be if we also
// have some commitment updates to send as well.
assert!($commitment_update.is_some());
- if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- let mut order = $order;
- if $raa.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
+ match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ // channel_reestablish doesn't guarantee the order it returns is sensical
+ // for the messages it returns, but if we're setting what messages to
+ // re-transmit on monitor update success, we need to make sure it is sane.
+ let mut order = $order;
+ if $raa.is_none() {
+ order = RAACommitmentOrder::CommitmentFirst;
+ }
+ break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
}
- break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
}
}
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
- if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
- let (result, is_permanent) =
- handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, channel_state, chan_entry);
- break result;
- }
+ let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+ let (result, is_permanent) =
+ handle_monitor_err!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ if is_permanent {
+ remove_channel!(self, channel_state, chan_entry);
+ break result;
}
}
channel_state, chan)
} {
Some((update_add, commitment_signed, monitor_update)) => {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
- // Note that MonitorUpdateFailed here indicates (per function docs)
- // that we will resend the commitment update once monitor updating
- // is restored. Therefore, we must return an error indicating that
- // it is unsafe to retry the payment wholesale, which we do in the
- // send_payment check for MonitorUpdateFailed, below.
- insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
- return Err(APIError::MonitorUpdateFailed);
+ let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+ let chan_id = chan.get().channel_id();
+ match (update_err,
+ handle_monitor_err!(self, update_err, channel_state, chan,
+ RAACommitmentOrder::CommitmentFirst, false, true))
+ {
+ (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+ (ChannelMonitorUpdateStatus::Completed, Ok(())) => {
+ insert_outbound_payment!();
+ },
+ (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+ // Note that MonitorUpdateFailed here indicates (per function docs)
+ // that we will resend the commitment update once monitor updating
+ // is restored. Therefore, we must return an error indicating that
+ // it is unsafe to retry the payment wholesale, which we do in the
+ // send_payment check for MonitorUpdateFailed, below.
+ insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
+ return Err(APIError::MonitorUpdateFailed);
+ },
+ _ => unreachable!(),
}
- insert_outbound_payment!();
- log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
+ log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: path.first().unwrap().pubkey,
updates: msgs::CommitmentUpdate {
continue;
}
};
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
- continue;
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+ continue;
+ }
}
log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
};
let ret_err = match res {
Ok(Some((update_fee, commitment_signed, monitor_update))) => {
- if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
- let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
- if drop { retain_channel = false; }
- res
- } else {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: Some(update_fee),
- commitment_signed,
- },
- });
- Ok(())
+ match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: Some(update_fee),
+ commitment_signed,
+ },
+ });
+ Ok(())
+ },
+ e => {
+ let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+ if drop { retain_channel = false; }
+ res
+ }
}
},
Ok(None) => Ok(()),
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
Ok(msgs_monitor_option) => {
if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
- "Failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- return ClaimFundsFromHop::MonitorUpdateFail(
- chan.get().get_counterparty_node_id(),
- handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
- Some(htlc_value_msat)
- );
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+ "Failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ return ClaimFundsFromHop::MonitorUpdateFail(
+ chan.get().get_counterparty_node_id(),
+ handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+ Some(htlc_value_msat)
+ );
+ }
}
if let Some((msg, commitment_signed)) = msgs {
log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
}
},
Err((e, monitor_update)) => {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
- "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
- payment_preimage, e);
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+ "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+ payment_preimage, e);
+ },
}
let counterparty_node_id = chan.get().get_counterparty_node_id();
let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
// We update the ChannelMonitor on the backward link, after
// receiving an offchain preimage event from the forward link (the
// event being update_fulfill_htlc).
- if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+ let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
+ if update_res != ChannelMonitorUpdateStatus::Completed {
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
+ payment_preimage, update_res);
}
// Note that we do *not* set `claimed_htlc` to false here. In fact, this
// totally could be a duplicate claim, but we have no way of knowing
};
// Because we have exclusive ownership of the channel here we can release the channel_state
// lock before watch_channel
- if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
- match e {
- ChannelMonitorUpdateErr::PermanentFailure => {
- // Note that we reply with the new channel_id in error messages if we gave up on the
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
- // any messages referencing a previously-closed channel anyway.
- // We do not do a force-close here as that would generate a monitor update for
- // a monitor that we didn't manage to store (and that we don't care about - we
- // don't respond with the funding_signed so the channel can never go on chain).
- let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
- assert!(failed_htlcs.is_empty());
- return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
- },
- ChannelMonitorUpdateErr::TemporaryFailure => {
- // There's no problem signing a counterparty's funding transaction if our monitor
- // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
- // accepted payment from yet. We do, however, need to wait to send our channel_ready
- // until we have persisted our monitor.
- chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
- channel_ready = None; // Don't send the channel_ready now
- },
- }
+ match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ // We do not propagate the monitor update to the user as it would be for a monitor
+ // that we didn't manage to store (and that we don't care about - we don't respond
+ // with the funding_signed so the channel can never go on chain).
+ let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
+ assert!(failed_htlcs.is_empty());
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
+ },
+ ChannelMonitorUpdateStatus::InProgress => {
+ // There's no problem signing a counterparty's funding transaction if our monitor
+ // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+ // accepted payment from yet. We do, however, need to wait to send our channel_ready
+ // until we have persisted our monitor.
+ chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+ channel_ready = None; // Don't send the channel_ready now
+ },
}
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
Ok(update) => update,
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
- if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
- // We weren't able to watch the channel to begin with, so no updates should be made on
- // it. Previously, full_stack_target found an (unreachable) panic when the
- // monitor update contained within `shutdown_finish` was applied.
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
- shutdown_finish.0.take();
+ match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
+ }
}
- }
- return res
+ return res
+ },
}
if let Some(msg) = channel_ready {
send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
- if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
- let (result, is_permanent) =
- handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, channel_state, chan_entry);
- break result;
- }
+ let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+ let (result, is_permanent) =
+ handle_monitor_err!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ if is_permanent {
+ remove_channel!(self, channel_state, chan_entry);
+ break result;
}
}
},
Ok(res) => res
};
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
+ let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+ if let Err(e) = handle_monitor_err!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+ return Err(e);
}
+
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id.clone(),
msg: revoke_and_ack,
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+ let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
let raa_updates = break_chan_entry!(self,
chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
- if was_frozen_for_monitor {
- assert!(raa_updates.commitment_update.is_none());
- assert!(raa_updates.accepted_htlcs.is_empty());
- assert!(raa_updates.failed_htlcs.is_empty());
- assert!(raa_updates.finalized_claimed_htlcs.is_empty());
- break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
- } else {
- if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
- RAACommitmentOrder::CommitmentFirst, false,
- raa_updates.commitment_update.is_some(), false,
- raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs) {
- break Err(e);
- } else { unreachable!(); }
- }
+ let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+ if was_paused_for_mon_update {
+ assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+ assert!(raa_updates.commitment_update.is_none());
+ assert!(raa_updates.accepted_htlcs.is_empty());
+ assert!(raa_updates.failed_htlcs.is_empty());
+ assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+ break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+ }
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ if let Err(e) = handle_monitor_err!(self, update_res, channel_state, chan,
+ RAACommitmentOrder::CommitmentFirst, false,
+ raa_updates.commitment_update.is_some(), false,
+ raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ raa_updates.finalized_claimed_htlcs) {
+ break Err(e);
+ } else { unreachable!(); }
}
if let Some(updates) = raa_updates.commitment_update {
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
});
}
},
- MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+ MonitorEvent::Completed { funding_txo, monitor_update_id } => {
self.channel_monitor_updated(&funding_txo, monitor_update_id);
},
}
));
}
if let Some((commitment_update, monitor_update)) = commitment_opt {
- if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
- has_monitor_update = true;
- let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
- handle_errors.push((chan.get_counterparty_node_id(), res));
- if close_channel { return false; }
- } else {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: commitment_update,
- });
+ match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: commitment_update,
+ });
+ },
+ e => {
+ has_monitor_update = true;
+ let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+ handle_errors.push((chan.get_counterparty_node_id(), res));
+ if close_channel { return false; }
+ },
}
}
true
//! A bunch of useful utilities for building networks of nodes and exchanging messages between
//! nodes for functional tests.
-use chain::{BestBlock, Confirm, Listen, Watch, keysinterface::KeysInterface};
+use chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, keysinterface::KeysInterface};
use chain::channelmonitor::ChannelMonitor;
use chain::transaction::OutPoint;
use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
for deserialized_monitor in deserialized_monitors.drain(..) {
- if let Err(_) = chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
+ if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != ChannelMonitorUpdateStatus::Completed {
panic!();
}
}
//! claim outputs on-chain.
use chain;
-use chain::{Confirm, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
use chain::chaininterface::LowerBoundedFeeEstimator;
use chain::channelmonitor;
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
assert_eq!(nodes[3].node.list_channels().len(), 0);
assert_eq!(nodes[4].node.list_channels().len(), 0);
- nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon).unwrap();
+ assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
+ ChannelMonitorUpdateStatus::Completed);
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
}
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[0].node = &nodes_0_deserialized;
check_added_monitors!(nodes[0], 1);
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[0].node = &nodes_0_deserialized;
assert_eq!(nodes[0].node.list_channels().len(), 1);
check_added_monitors!(nodes[0], 1);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[0].node = &nodes_0_deserialized;
// After deserializing, make sure the funding_transaction is still held by the channel manager
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[0].node = &nodes_0_deserialized;
check_added_monitors!(nodes[0], 1);
}
for monitor in node_0_monitors.drain(..) {
- assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
}).unwrap().1
};
nodes[0].node = &node_state_0;
- assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
+ assert_eq!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[0].chain_monitor = &monitor;
nodes[0].chain_source = &chain_source;
fn test_update_err_monitor_lockdown() {
// Our monitor will lock update of local commitment transaction if a broadcastion condition
// has been fulfilled (either force-close from Channel or block height requiring a HTLC-
- // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateErr.
+ // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateStatus
+ // error.
//
// This scenario may happen in a watchtower setup, where watchtower process a block height
// triggering a timeout while a slow-block-processing ChannelManager receives a local signed
&mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
- assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+ assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
- if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+ assert_eq!(watchtower.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else { assert!(false); };
// Our local monitor is in-sync and hasn't processed yet timeout
&mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
- assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+ assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
&mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
- assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
+ assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
- if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
- if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
- if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+ assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::PermanentFailure);
+ assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()), ChannelMonitorUpdateStatus::Completed);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else { assert!(false); };
// Our local monitor is in-sync and hasn't processed yet timeout
nodes_1_deserialized = nodes_1_deserialized_tmp;
assert!(nodes_1_read.is_empty());
- assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
- assert!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor).is_ok());
+ assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
+ assert_eq!(nodes[1].chain_monitor.watch_channel(chan_1_monitor.get_funding_txo().0, chan_1_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[1].node = &nodes_1_deserialized;
check_added_monitors!(nodes[1], 2);
for monitor in monitors {
// On startup the preimage should have been copied into the non-persisted monitor:
assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
- nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
+ assert_eq!(nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor),
+ ChannelMonitorUpdateStatus::Completed);
}
check_added_monitors!(nodes[3], 2);
//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
//! payments thereafter.
-use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
use chain::transaction::OutPoint;
use chain::keysinterface::KeysInterface;
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
nodes[0].node = &nodes_0_deserialized;
check_added_monitors!(nodes[0], 1);
$chan_manager = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
if !chan_1_monitor_serialized.0.is_empty() {
let funding_txo = chan_1_monitor.as_ref().unwrap().get_funding_txo().0;
- assert!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(funding_txo, chan_1_monitor.unwrap()),
+ ChannelMonitorUpdateStatus::Completed);
}
nodes[0].node = &$chan_manager;
check_added_monitors!(nodes[0], if !chan_1_monitor_serialized.0.is_empty() { 2 } else { 1 });
}
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
- // returning TemporaryFailure. This should cause the claim event to never make its way to the
+ // returning InProgress. This should cause the claim event to never make its way to the
// ChannelManager.
chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
if payment_timeout {
connect_blocks(&nodes[0], 1);
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
// payment sent event.
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
for update in mon_updates {
};
nodes_0_deserialized = nodes_0_deserialized_tmp;
- assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
nodes[0].node = &nodes_0_deserialized;
};
nodes_1_deserialized = nodes_1_deserialized_tmp;
- assert!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert_eq!(nodes[1].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[1], 1);
nodes[1].node = &nodes_1_deserialized;
//! other behavior that exists only on private channels or with a semi-trusted counterparty (eg
//! LSP).
-use chain::{ChannelMonitorUpdateErr, Watch};
+use chain::{ChannelMonitorUpdateStatus, Watch};
use chain::channelmonitor::ChannelMonitor;
use chain::keysinterface::{Recipient, KeysInterface};
use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
assert!(nodes_1_read.is_empty());
nodes_1_deserialized = nodes_1_deserialized_tmp;
- assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
- assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
+ assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a),
+ ChannelMonitorUpdateStatus::Completed);
+ assert_eq!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b),
+ ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[1], 2);
nodes[1].node = &nodes_1_deserialized;
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(bs_signed_locked.len(), 2);
- chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
match &bs_signed_locked[0] {
MessageSendEvent::SendFundingSigned { node_id, msg } => {
_ => panic!("Unexpected event"),
};
- chanmon_cfgs[0].persister.set_update_ret(Ok(()));
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_channel_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_channel_update);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
check_added_monitors!(nodes[0], 1);
- chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
check_added_monitors!(nodes[1], 0);
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use chain::transaction::OutPoint;
-use chain::{Confirm, Watch};
+use chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
use ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs};
use ln::msgs::ChannelMessageHandler;
use util::enforcing_trait_impls::EnforcingSigner;
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
}
- nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+ assert_eq!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor),
+ ChannelMonitorUpdateStatus::Completed);
check_added_monitors!(nodes[0], 1);
}
/// A human-readable error message
err: String
},
- /// An attempt to call watch/update_channel returned an Err (ie you did this!), causing the
- /// attempted action to fail.
+ /// An attempt to call watch/update_channel returned a
+ /// [`ChannelMonitorUpdateStatus::InProgress`] indicating the persistence of a monitor update
+ /// is awaiting async resolution. Once it resolves the attempted action should complete
+ /// automatically.
+ ///
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
MonitorUpdateFailed,
/// [`KeysInterface::get_shutdown_scriptpubkey`] returned a shutdown scriptpubkey incompatible
/// with the channel counterparty as negotiated in [`InitFeatures`].
// A PermanentFailure implies we need to shut down since we're force-closing channels without
// even broadcasting!
- fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
- self.persist(&key, monitor)
- .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
+ match self.persist(&key, monitor) {
+ Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+ Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
+ }
}
- fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
- self.persist(&key, monitor)
- .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
+ match self.persist(&key, monitor) {
+ Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+ Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
+ }
}
}
}
}
impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
- fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
let mut w = TestVecWriter(Vec::new());
self.chain_monitor.watch_channel(funding_txo, new_monitor)
}
- fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
// Every monitor update should survive roundtrip
let mut w = TestVecWriter(Vec::new());
update.write(&mut w).unwrap();
}
pub struct TestPersister {
- pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
+ pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
/// If this is set to Some(), after the next return, we'll always return this until update_ret
/// is changed:
- pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
+ pub next_update_ret: Mutex<Option<chain::ChannelMonitorUpdateStatus>>,
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
/// MonitorUpdateId here.
pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
impl TestPersister {
pub fn new() -> Self {
Self {
- update_ret: Mutex::new(Ok(())),
+ update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed),
next_update_ret: Mutex::new(None),
chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
offchain_monitor_updates: Mutex::new(HashMap::new()),
}
}
- pub fn set_update_ret(&self, ret: Result<(), chain::ChannelMonitorUpdateErr>) {
+ pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) {
*self.update_ret.lock().unwrap() = ret;
}
- pub fn set_next_update_ret(&self, next_ret: Option<Result<(), chain::ChannelMonitorUpdateErr>>) {
+ pub fn set_next_update_ret(&self, next_ret: Option<chain::ChannelMonitorUpdateStatus>) {
*self.next_update_ret.lock().unwrap() = next_ret;
}
}
impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
- fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let ret = self.update_ret.lock().unwrap().clone();
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
*self.update_ret.lock().unwrap() = next_ret;
ret
}
- fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
+ fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let ret = self.update_ret.lock().unwrap().clone();
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
*self.update_ret.lock().unwrap() = next_ret;