From: Jeffrey Czyz Date: Tue, 21 Jul 2020 00:03:52 +0000 (-0700) Subject: Replace ManyChannelMonitor with chain::Watch X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=449968bdde5ec960b306cc605ab76594dddf536a;p=rust-lightning Replace ManyChannelMonitor with chain::Watch Rename ManyChannelMonitor to chain::Watch and move to chain/mod.rs, where chain-related interfaces live. Update the documentation for clarity and to conform to rustdoc formatting. --- diff --git a/ARCH.md b/ARCH.md index 061c0ba4f..1d987a353 100644 --- a/ARCH.md +++ b/ARCH.md @@ -6,7 +6,7 @@ need to use are `ChannelManager` and `ChannelMonitor`. `ChannelManager` holds mu channels, routes payments between them, and exposes a simple API to make and receive payments. Individual `ChannelMonitor`s monitor the on-chain state of a channel, punish counterparties if they misbehave, and force-close channels if they contain unresolved -HTLCs which are near expiration. The `ManyChannelMonitor` API provides a way for you to +HTLCs which are near expiration. The `chain::Watch` interface provides a way for you to receive `ChannelMonitorUpdate`s from `ChannelManager` and persist them to disk before the channel steps forward. @@ -45,9 +45,9 @@ At a high level, some of the common interfaces fit together as follows: | \ | | / ---------> | BroadcasterInterface | | \ | | / / | ------------------------ | \ v v v / v ^ - | (as ------------------ ---------------------- - | ChannelMessageHandler)-> | ChannelManager | ----> | ManyChannelMonitor | - v / ------------------ ---------------------- + | (as ------------------ ---------------- + | ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch | + v / ------------------ ---------------- --------------- / (as EventsProvider) | PeerManager |- \ / --------------- \ / diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2c664369e..e97ae805f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -20,6 +20,7 @@ use bitcoin::hashes::Hash as TraitImport; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{BlockHash, WPubkeyHash}; +use lightning::chain; use lightning::chain::transaction::OutPoint; use lightning::chain::chaininterface::{BroadcasterInterface, ChainListener, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys}; @@ -95,21 +96,21 @@ impl TestChannelMonitor { } } } -impl channelmonitor::ManyChannelMonitor for TestChannelMonitor { +impl chain::Watch for TestChannelMonitor { type Keys = EnforcingChannelKeys; - fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { let mut ser = VecWriter(Vec::new()); monitor.write_for_disk(&mut ser).unwrap(); if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { - panic!("Already had monitor pre-add_monitor"); + panic!("Already had monitor pre-watch_channel"); } self.should_update_manager.store(true, atomic::Ordering::Relaxed); - assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok()); + assert!(self.simple_monitor.watch_channel(funding_txo, monitor).is_ok()); self.update_ret.lock().unwrap().clone() } - fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { let mut map_lock = self.latest_monitors.lock().unwrap(); let mut map_entry = match map_lock.entry(funding_txo) { hash_map::Entry::Occupied(entry) => entry, @@ -125,8 +126,8 @@ impl channelmonitor::ManyChannelMonitor for TestChannelMonitor { self.update_ret.lock().unwrap().clone() } - fn get_and_clear_pending_htlcs_updated(&self) -> Vec { - return self.simple_monitor.get_and_clear_pending_htlcs_updated(); + fn release_pending_htlc_updates(&self) -> Vec { + return self.simple_monitor.release_pending_htlc_updates(); } } @@ -204,7 +205,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! reload_node { ($ser: expr, $node_id: expr, $old_monitors: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone())); + let chain_monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone())); let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) }); let mut config = UserConfig::default(); @@ -216,7 +217,7 @@ pub fn do_test(data: &[u8], out: Out) { let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() { monitors.insert(outpoint, <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser)).expect("Failed to read monitor").1); - monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); + chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); } let mut monitor_refs = HashMap::new(); for (outpoint, monitor) in monitors.iter_mut() { @@ -226,14 +227,14 @@ pub fn do_test(data: &[u8], out: Out) { let read_args = ChannelManagerReadArgs { keys_manager, fee_estimator: fee_est.clone(), - monitor: monitor.clone(), + chain_monitor: chain_monitor.clone(), tx_broadcaster: broadcast.clone(), logger, default_config: config, channel_monitors: &mut monitor_refs, }; - (<(BlockHash, ChannelManager, Arc, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor) + (<(BlockHash, ChannelManager, Arc, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor) } } } diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 5c79853ce..0a63a4c5a 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -4,7 +4,9 @@ use bitcoin::blockdata::script::Script; use bitcoin::blockdata::transaction::TxOut; use bitcoin::hash_types::{BlockHash, Txid}; +use chain::keysinterface::ChannelKeys; use chain::transaction::OutPoint; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, HTLCUpdate}; pub mod chaininterface; pub mod transaction; @@ -32,6 +34,51 @@ pub enum AccessError { UnknownTx, } +/// The `Watch` trait defines behavior for watching on-chain activity pertaining to channels as +/// blocks are connected and disconnected. +/// +/// Each channel is associated with a [`ChannelMonitor`]. Implementations are responsible for +/// maintaining a set of monitors and updating them accordingly as channel state changes and HTLCs +/// are resolved on chain. See method documentation for specific requirements. +/// +/// TODO: Add documentation about persisting monitors. +/// +/// If an implementation maintains multiple instances of a channel's monitor (e.g., by using a +/// watchtower), then it must ensure that updates are applied across all instances. Otherwise, it +/// could result in a revoked transaction being broadcast, allowing the counterparty to claim all +/// funds in the channel. +/// +/// [`ChannelMonitor`]: ../ln/channelmonitor/struct.ChannelMonitor.html +pub trait Watch: Send + Sync { + /// Keys needed by monitors for creating and signing transactions. + type Keys: ChannelKeys; + + /// Watches a channel identified by `funding_txo` using `monitor`. + /// + /// Implementations are responsible for watching the chain for the funding transaction along + /// with spends of its output and any outputs returned by [`get_outputs_to_watch`]. In practice, + /// this means calling [`block_connected`] and [`block_disconnected`] on the monitor and + /// including all such transactions that meet this criteria. + /// + /// [`get_outputs_to_watch`]: ../ln/channelmonitor/struct.ChannelMonitor.html#method.get_outputs_to_watch + /// [`block_connected`]: ../ln/channelmonitor/struct.ChannelMonitor.html#method.block_connected + /// [`block_disconnected`]: ../ln/channelmonitor/struct.ChannelMonitor.html#method.block_disconnected + fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; + + /// Updates a channel identified by `funding_txo` by applying `update` to its monitor. + /// + /// Implementations must call [`update_monitor`] with the given update. See + /// [`ChannelMonitorUpdateErr`] for invariants around returning an error. + /// + /// [`update_monitor`]: ../ln/channelmonitor/struct.ChannelMonitor.html#method.update_monitor + /// [`ChannelMonitorUpdateErr`]: ../ln/channelmonitor/enum.ChannelMonitorUpdateErr.html + fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>; + + /// Returns any HTLCs resolved on chain since the last call. Subsequent calls must only return + /// newly resolved HTLCs. + fn release_pending_htlc_updates(&self) -> Vec; +} + /// An interface for providing [`WatchEvent`]s. /// /// [`WatchEvent`]: enum.WatchEvent.html diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 014273b5c..006a71284 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -26,10 +26,12 @@ use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; +use chain; +use chain::Watch; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; @@ -347,7 +349,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage /// /// Note that you can be a bit lazier about writing out ChannelManager than you can be with /// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before -/// returning from ManyChannelMonitor::add_/update_monitor, with ChannelManagers, writing updates +/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates /// happens out-of-band (and will prevent any other ChannelManager operations from occurring during /// the serialization process). If the deserialized version is out-of-date compared to the /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the @@ -371,7 +373,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when /// you're using lightning-net-tokio. pub struct ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -380,7 +382,7 @@ pub struct ChannelManager ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -708,14 +710,14 @@ impl /// the ChannelManager as a listener to the BlockNotifier and call the BlockNotifier's /// `block_(dis)connected` methods, which will notify all registered listeners in one /// go. - pub fn new(network: Network, fee_est: F, monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self { + pub fn new(network: Network, fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self { let secp_ctx = Secp256k1::new(); ChannelManager { default_configuration: config.clone(), genesis_hash: genesis_block(network).header.bitcoin_hash(), fee_estimator: fee_est, - monitor, + chain_monitor, tx_broadcaster, latest_block_height: AtomicUsize::new(current_blockchain_height), @@ -890,7 +892,7 @@ impl // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to // ignore the result here. - let _ = self.monitor.update_monitor(funding_txo, monitor_update); + let _ = self.chain_monitor.update_channel(funding_txo, monitor_update); } } @@ -1269,7 +1271,7 @@ impl }, onion_packet, &self.logger), channel_state, chan) } { Some((update_add, commitment_signed, monitor_update)) => { - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); // Note that MonitorUpdateFailed here indicates (per function docs) // that we will resend the commitment update once monitor updating @@ -1665,7 +1667,7 @@ impl continue; } }; - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; } @@ -2037,7 +2039,7 @@ impl match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok((msgs, monitor_option)) => { if let Some(monitor_update) = monitor_option { - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { assert!(msgs.is_none()); } else { @@ -2121,7 +2123,7 @@ impl /// exists largely only to prevent races between this and concurrent update_monitor calls. /// /// Thus, the anticipated use is, at a high level: - /// 1) You register a ManyChannelMonitor with this ChannelManager, + /// 1) You register a chain::Watch with this ChannelManager, /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of /// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures /// any time it cannot do so instantly, @@ -2262,7 +2264,7 @@ impl } fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let ((funding_msg, monitor_update), mut chan) = { + let ((funding_msg, monitor), mut chan) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { @@ -2276,8 +2278,8 @@ impl } }; // Because we have exclusive ownership of the channel here we can release the channel_state - // lock before add_monitor - if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().0, monitor_update) { + // lock before watch_channel + if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { // Note that we reply with the new channel_id in error messages if we gave up on the @@ -2325,7 +2327,7 @@ impl Ok(update) => update, Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; - if let Err(e) = self.monitor.add_monitor(chan.get().get_funding_txo().unwrap(), monitor) { + if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); } (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) @@ -2595,13 +2597,13 @@ impl Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), update); + let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); try_chan_entry!(self, Err(e), channel_state, chan); unreachable!(); }, Ok(res) => res }; - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); //TODO: Rebroadcast closing_signed if present on monitor update restoration } @@ -2681,7 +2683,7 @@ impl let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan); - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); @@ -2795,7 +2797,7 @@ impl let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); if let Some(monitor_update) = monitor_update_opt { - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { // channel_reestablish doesn't guarantee the order it returns is sensical // for the messages it returns, but if we're setting what messages to // re-transmit on monitor update success, we need to make sure it is sane. @@ -2881,7 +2883,7 @@ impl if let Some((update_fee, commitment_signed, monitor_update)) = break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan) { - if let Err(_e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2909,7 +2911,7 @@ impl } impl events::MessageSendEventsProvider for ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -2921,7 +2923,7 @@ impl // restart. This is doubly true for the fail/fulfill-backs from monitor events! { //TODO: This behavior should be documented. - for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() { + for htlc_update in self.chain_monitor.release_pending_htlc_updates() { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); @@ -2940,7 +2942,7 @@ impl } impl events::EventsProvider for ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -2952,7 +2954,7 @@ impl // restart. This is doubly true for the fail/fulfill-backs from monitor events! { //TODO: This behavior should be documented. - for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() { + for htlc_update in self.chain_monitor.release_pending_htlc_updates() { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); @@ -2972,7 +2974,7 @@ impl impl ChainListener for ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -3140,7 +3142,7 @@ impl ChannelMessageHandler for ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -3579,7 +3581,7 @@ impl Readable for HTLCForwardInfo { } impl Writeable for ChannelManager - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -3659,10 +3661,10 @@ impl - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -3677,12 +3679,12 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: /// /// No calls to the FeeEstimator will be made during deserialization. pub fee_estimator: F, - /// The ManyChannelMonitor for use in the ChannelManager in the future. + /// The chain::Watch for use in the ChannelManager in the future. /// - /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that + /// No calls to the chain::Watch will be made during deserialization. It is assumed that /// you have deserialized ChannelMonitors separately and will add them to your - /// ManyChannelMonitor after deserializing this ChannelManager. - pub monitor: M, + /// chain::Watch after deserializing this ChannelManager. + pub chain_monitor: M, /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be /// used to broadcast the latest local commitment transactions of channels which must be @@ -3712,7 +3714,7 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: // SipmleArcChannelManager type: impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ReadableArgs> for (BlockHash, Arc>) - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -3726,7 +3728,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ReadableArgs> for (BlockHash, ChannelManager) - where M::Target: ManyChannelMonitor, + where M::Target: chain::Watch, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, @@ -3838,7 +3840,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, - monitor: args.monitor, + chain_monitor: args.chain_monitor, tx_broadcaster: args.tx_broadcaster, latest_block_height: AtomicUsize::new(latest_block_height as usize), diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 63948fd1c..a8cf5177f 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -3,13 +3,15 @@ //! //! ChannelMonitor objects are generated by ChannelManager in response to relevant //! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can -//! be made in responding to certain messages, see ManyChannelMonitor for more. +//! be made in responding to certain messages, see [`chain::Watch`] for more. //! //! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the //! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date //! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other //! security-domain-separated system design, you should consider having multiple paths for //! ChannelMonitors to get out of the HSM and onto monitoring devices. +//! +//! [`chain::Watch`]: ../../chain/trait.Watch.html use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::{TxOut,Transaction}; @@ -140,8 +142,10 @@ pub enum ChannelMonitorUpdateErr { #[derive(Debug)] pub struct MonitorUpdateError(pub &'static str); -/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a +/// Simple structure send back by `chain::Watch` in case of HTLC detected onchain from a /// forward channel and from which info are needed to update HTLC in a backward channel. +/// +/// [`chain::Watch`]: ../../chain/trait.Watch.html #[derive(Clone, PartialEq)] pub struct HTLCUpdate { pub(super) payment_hash: PaymentHash, @@ -150,7 +154,7 @@ pub struct HTLCUpdate { } impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source }); -/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a +/// A simple implementation of a [`chain::Watch`] and ChainListener. Can be used to create a /// watchtower or watch our own channels. /// /// Note that you must provide your own key by which to refer to channels. @@ -160,7 +164,9 @@ impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source }); /// index by a PublicKey which is required to sign any updates. /// /// If you're using this for local monitoring of your own channels, you probably want to use -/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation. +/// `OutPoint` as the key, which will give you a [`chain::Watch`] implementation. +/// +/// [`chain::Watch`]: ../../chain/trait.Watch.html pub struct SimpleManyChannelMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -304,28 +310,28 @@ impl ManyChannelMonitor for SimpleManyChannelMonitor +impl chain::Watch for SimpleManyChannelMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { type Keys = ChanSigner; - fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { + fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { match self.add_monitor_by_key(funding_txo, monitor) { Ok(_) => Ok(()), Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), } } - fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> { + fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> { match self.update_monitor_by_key(funding_txo, update) { Ok(_) => Ok(()), Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), } } - fn get_and_clear_pending_htlcs_updated(&self) -> Vec { + fn release_pending_htlc_updates(&self) -> Vec { let mut pending_htlcs_updated = Vec::new(); for chan in self.monitors.lock().unwrap().values_mut() { pending_htlcs_updated.append(&mut chan.get_and_clear_pending_htlcs_updated()); @@ -864,52 +870,6 @@ pub struct ChannelMonitor { secp_ctx: Secp256k1, //TODO: dedup this a bit... } -/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between -/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing -/// events to it, while also taking any add/update_monitor events and passing them to some remote -/// server(s). -/// -/// In general, you must always have at least one local copy in memory, which must never fail to -/// update (as it is responsible for broadcasting the latest state in case the channel is closed), -/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails -/// to update (eg out-of-memory or some other condition), you must immediately shut down without -/// taking any further action such as writing the current state to disk. This should likely be -/// accomplished via panic!() or abort(). -/// -/// Note that any updates to a channel's monitor *must* be applied to each instance of the -/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If -/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions -/// which we have revoked, allowing our counterparty to claim all funds in the channel! -/// -/// User needs to notify implementors of ManyChannelMonitor when a new block is connected or -/// disconnected using their `block_connected` and `block_disconnected` methods. However, rather -/// than calling these methods directly, the user should register implementors as listeners to the -/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify -/// all registered listeners in one go. -pub trait ManyChannelMonitor: Send + Sync { - /// The concrete type which signs for transactions and provides access to our channel public - /// keys. - type Keys: ChannelKeys; - - /// Adds a monitor for the given `funding_txo`. - /// - /// Implementations must ensure that `monitor` receives block_connected calls for blocks with - /// the funding transaction or any spends of it, as well as any spends of outputs returned by - /// get_outputs_to_watch. Not doing so may result in LOST FUNDS. - fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; - - /// Updates a monitor for the given `funding_txo`. - fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>; - - /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated - /// with success or failure. - /// - /// You should probably just call through to - /// ChannelMonitor::get_and_clear_pending_htlcs_updated() for each ChannelMonitor and return - /// the full list. - fn get_and_clear_pending_htlcs_updated(&self) -> Vec; -} - #[cfg(any(test, feature = "fuzztarget"))] /// Used only in testing and fuzztarget to check serialization roundtrips don't change the /// underlying object @@ -1467,7 +1427,9 @@ impl ChannelMonitor { } /// Get the list of HTLCs who's status has been updated on chain. This should be called by - /// ChannelManager via ManyChannelMonitor::get_and_clear_pending_htlcs_updated(). + /// ChannelManager via [`chain::Watch::release_pending_htlc_updates`]. + /// + /// [`chain::Watch::release_pending_htlc_updates`]: ../../chain/trait.Watch.html#tymethod.release_pending_htlc_updates pub fn get_and_clear_pending_htlcs_updated(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_htlcs_updated); @@ -1477,7 +1439,7 @@ impl ChannelMonitor { /// Gets the list of pending events which were generated by previous actions, clearing the list /// in the process. /// - /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to + /// This is called by SimpleManyChannelMonitor::get_and_clear_pending_events() and is equivalent to /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do /// no internal locking in ChannelMonitors. pub fn get_and_clear_pending_events(&mut self) -> Vec { @@ -1896,12 +1858,15 @@ impl ChannelMonitor { Vec::new() } - /// Called by SimpleManyChannelMonitor::block_connected, which implements - /// ChainListener::block_connected. - /// Eventually this should be pub and, roughly, implement ChainListener, however this requires - /// &mut self, as well as returns new spendable outputs and outpoints to watch for spending of - /// on-chain. - fn block_connected(&mut self, header: &BlockHeader, txn_matched: &[(usize, &Transaction)], height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> + /// Determines if any HTLCs have been resolved on chain in the connected block. + /// + /// TODO: Include how `broadcaster` and `fee_estimator` are used. + /// + /// Returns any transaction outputs from `txn_matched` that spends of should be watched for. + /// After called these are also available via [`get_outputs_to_watch`]. + /// + /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch + pub fn block_connected(&mut self, header: &BlockHeader, txn_matched: &[(usize, &Transaction)], height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec)> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -2002,7 +1967,11 @@ impl ChannelMonitor { watch_outputs } - fn block_disconnected(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L) + /// Determines if the disconnected block contained any transactions of interest and updates + /// appropriately. + /// + /// TODO: Include how `broadcaster` and `fee_estimator` are used. + pub fn block_disconnected(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c59fdae6e..eb8db82f8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2,10 +2,11 @@ //! nodes for functional tests. use chain; +use chain::Watch; use chain::chaininterface; use chain::transaction::OutPoint; use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure}; -use ln::channelmonitor::{ChannelMonitor, ManyChannelMonitor}; +use ln::channelmonitor::ChannelMonitor; use routing::router::{Route, get_route}; use routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; use ln::features::InitFeatures; @@ -201,7 +202,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { default_config: UserConfig::default(), keys_manager: self.keys_manager, fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 }, - monitor: self.chan_monitor, + chain_monitor: self.chan_monitor, tx_broadcaster: self.tx_broadcaster.clone(), logger: &test_utils::TestLogger::new(), channel_monitors: &mut channel_monitors, @@ -210,7 +211,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let channel_monitor = test_utils::TestChannelMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest); for deserialized_monitor in deserialized_monitors.drain(..) { - if let Err(_) = channel_monitor.add_monitor(deserialized_monitor.get_funding_txo().0, deserialized_monitor) { + if let Err(_) = channel_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) { panic!(); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 581ca2be5..ec6fcfaf5 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2,12 +2,13 @@ //! payments/messages between them, and often checking the resulting ChannelMonitors are able to //! claim outputs on-chain. +use chain::Watch; use chain::transaction::OutPoint; use chain::keysinterface::{ChannelKeys, KeysInterface, SpendableOutputDescriptor}; use chain::chaininterface::{ChainListener, BlockNotifier}; use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure, BREAKDOWN_TIMEOUT}; -use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::channelmonitor; use ln::channel::{Channel, ChannelError}; use ln::{chan_utils, onion_utils}; @@ -4290,7 +4291,7 @@ fn test_no_txn_manager_serialize_deserialize() { default_config: config, keys_manager: &keys_manager, fee_estimator: &fee_estimator, - monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: &mut channel_monitors, @@ -4299,7 +4300,7 @@ fn test_no_txn_manager_serialize_deserialize() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); nodes[0].node = &nodes_0_deserialized; nodes[0].block_notifier.register_listener(nodes[0].node); assert_eq!(nodes[0].node.list_channels().len(), 1); @@ -4398,7 +4399,7 @@ fn test_manager_serialize_deserialize_events() { default_config: config, keys_manager: &keys_manager, fee_estimator: &fee_estimator, - monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: &mut channel_monitors, @@ -4409,7 +4410,7 @@ fn test_manager_serialize_deserialize_events() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); nodes[0].node = &nodes_0_deserialized; // After deserializing, make sure the FundingBroadcastSafe event is still held by the channel manager @@ -4488,7 +4489,7 @@ fn test_simple_manager_serialize_deserialize() { default_config: UserConfig::default(), keys_manager: &keys_manager, fee_estimator: &fee_estimator, - monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: &mut channel_monitors, @@ -4497,7 +4498,7 @@ fn test_simple_manager_serialize_deserialize() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); nodes[0].node = &nodes_0_deserialized; check_added_monitors!(nodes[0], 1); @@ -4578,7 +4579,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { default_config: UserConfig::default(), keys_manager: &keys_manager, fee_estimator: &fee_estimator, - monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: &mut node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), @@ -4592,7 +4593,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { default_config: UserConfig::default(), keys_manager: &keys_manager, fee_estimator: &fee_estimator, - monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: &mut node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), @@ -4608,7 +4609,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { } for monitor in node_0_monitors.drain(..) { - assert!(nodes[0].chan_monitor.add_monitor(monitor.get_funding_txo().0, monitor).is_ok()); + assert!(nodes[0].chan_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok()); check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; @@ -7541,7 +7542,7 @@ fn test_data_loss_protect() { <(BlockHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs { keys_manager: &keys_manager, fee_estimator: &fee_estimator, - monitor: &monitor, + chain_monitor: &monitor, logger: &logger, tx_broadcaster: &tx_broadcaster, default_config: UserConfig::default(), @@ -7549,7 +7550,7 @@ fn test_data_loss_protect() { }).unwrap().1 }; nodes[0].node = &node_state_0; - assert!(monitor.add_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok()); + assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok()); nodes[0].chan_monitor = &monitor; nodes[0].chain_source = &chain_source; @@ -8366,7 +8367,7 @@ fn test_update_err_monitor_lockdown() { &mut ::std::io::Cursor::new(&w.0)).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); - assert!(watchtower.add_monitor(outpoint, new_monitor).is_ok()); + assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok()); watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; @@ -8380,8 +8381,8 @@ fn test_update_err_monitor_lockdown() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { - if let Err(_) = watchtower.simple_monitor.update_monitor(outpoint, update.clone()) {} else { assert!(false); } - if let Ok(_) = nodes[0].chan_monitor.update_monitor(outpoint, update) {} else { assert!(false); } + if let Err(_) = watchtower.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } + if let Ok(_) = nodes[0].chan_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } } else { assert!(false); }; // Our local monitor is in-sync and hasn't processed yet timeout diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 3fe2d5e73..067828ecf 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -120,7 +120,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { }; connect_block(&nodes[1], &block, CHAN_CONFIRM_DEPTH + 1); - // ChannelManager only polls ManyChannelMonitor::get_and_clear_pending_htlcs_updated when we + // ChannelManager only polls chain::Watch::release_pending_htlc_updates when we // probe it for events, so we probe non-message events here (which should still end up empty): assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); } else { diff --git a/lightning/src/util/errors.rs b/lightning/src/util/errors.rs index 7119c3a02..868af85d3 100644 --- a/lightning/src/util/errors.rs +++ b/lightning/src/util/errors.rs @@ -33,7 +33,7 @@ pub enum APIError { /// A human-readable error message err: String }, - /// An attempt to call add/update_monitor returned an Err (ie you did this!), causing the + /// An attempt to call watch/update_channel returned an Err (ie you did this!), causing the /// attempted action to fail. MonitorUpdateFailed, } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index dde51afea..f3db5f499 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -71,10 +71,10 @@ impl<'a> TestChannelMonitor<'a> { } } } -impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> { +impl<'a> chain::Watch for TestChannelMonitor<'a> { type Keys = EnforcingChannelKeys; - fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); @@ -84,7 +84,7 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> { assert!(new_monitor == monitor); self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id())); self.added_monitors.lock().unwrap().push((funding_txo, monitor)); - assert!(self.simple_monitor.add_monitor(funding_txo, new_monitor).is_ok()); + assert!(self.simple_monitor.watch_channel(funding_txo, new_monitor).is_ok()); let ret = self.update_ret.lock().unwrap().clone(); if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() { @@ -93,7 +93,7 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> { ret } - fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); @@ -101,7 +101,7 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> { &mut ::std::io::Cursor::new(&w.0)).unwrap() == update); self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id)); - assert!(self.simple_monitor.update_monitor(funding_txo, update).is_ok()); + assert!(self.simple_monitor.update_channel(funding_txo, update).is_ok()); // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let monitors = self.simple_monitor.monitors.lock().unwrap(); @@ -120,8 +120,8 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> { ret } - fn get_and_clear_pending_htlcs_updated(&self) -> Vec { - return self.simple_monitor.get_and_clear_pending_htlcs_updated(); + fn release_pending_htlc_updates(&self) -> Vec { + return self.simple_monitor.release_pending_htlc_updates(); } }