----------------- | UserConfig |
-------------------- | --------------
/------| MessageSendEvent | | | ----------------
- | -------------------- | | | FeeEstimator |
- | (as MessageSendEventsProvider) | | ----------------
- | ^ | | / | ------------------------
- | \ | | / ---------> | BroadcasterInterface |
- | \ | | / / | ------------------------
- | \ v v v / v ^
- | (as ------------------ ----------------
- | ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch |
- v / ------------------ ----------------
---------------- / (as EventsProvider)
-| PeerManager |- \ /
---------------- \ /
- | ----------------- \ /
- | | chain::Access | v
- | ----------------- ---------
- | | | Event |
-(as RoutingMessageHandler) v ---------
- \ --------------------
- -----------------> | NetGraphMsgHandler |
- --------------------
+ | -------------------- | | | FeeEstimator | <-----------------------
+ | (as MessageSendEventsProvider) | | ---------------- \
+ | ^ | | / ------------------------ |
+ | \ | | / ---------> | BroadcasterInterface | |
+ | \ | | / / ------------------------ |
+ | \ v v v / ^ |
+ | (as ------------------ ---------------- | |
+ | ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch | | |
+ v / ------------------ ---------------- | |
+--------------- / (as EventsProvider) ^ | |
+| PeerManager |- \ | | |
+--------------- \ | (is-a) | |
+ | ----------------- \ _---------------- / /
+ | | chain::Access | \ / | ChainMonitor |---------------
+ | ----------------- \ / ----------------
+ | | \ /
+(as RoutingMessageHandler) v v
+ \ -------------------- ---------
+ -----------------> | NetGraphMsgHandler | | Event |
+ -------------------- ---------
```
}
}
-struct TestChannelMonitor {
+struct TestChainMonitor {
pub logger: Arc<dyn Logger>,
- pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
+ pub chain_monitor: Arc<channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
// If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
// logic will automatically force-close our channels for us (as we don't have an up-to-date
pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
pub should_update_manager: atomic::AtomicBool,
}
-impl TestChannelMonitor {
+impl TestChainMonitor {
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>) -> Self {
Self {
- simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(broadcaster, logger.clone(), feeest)),
+ chain_monitor: Arc::new(channelmonitor::ChainMonitor::new(broadcaster, logger.clone(), feeest)),
logger,
update_ret: Mutex::new(Ok(())),
latest_monitors: Mutex::new(HashMap::new()),
}
}
}
-impl chain::Watch for TestChannelMonitor {
+impl chain::Watch for TestChainMonitor {
type Keys = EnforcingChannelKeys;
fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
panic!("Already had monitor pre-watch_channel");
}
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
- assert!(self.simple_monitor.watch_channel(funding_txo, monitor).is_ok());
+ assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok());
self.update_ret.lock().unwrap().clone()
}
}
fn release_pending_htlc_updates(&self) -> Vec<HTLCUpdate> {
- return self.simple_monitor.release_pending_htlc_updates();
+ return self.chain_monitor.release_pending_htlc_updates();
}
}
macro_rules! make_node {
($node_id: expr) => { {
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
+ let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
let mut config = UserConfig::default();
macro_rules! reload_node {
($ser: expr, $node_id: expr, $old_monitors: expr) => { {
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let chain_monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
+ let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
let mut config = UserConfig::default();
channel_monitors: &mut monitor_refs,
};
- (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
+ (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
} }
}
type ChannelMan = ChannelManager<
EnforcingChannelKeys,
- Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
+ Arc<channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<NetGraphMsgHandler<Arc<dyn chain::Access>, Arc<dyn Logger>>>, Arc<dyn Logger>>;
struct MoneyLossDetector<'a> {
manager: Arc<ChannelMan>,
- monitor: Arc<channelmonitor::SimpleManyChannelMonitor<
+ monitor: Arc<channelmonitor::ChainMonitor<
OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
handler: PeerMan<'a>,
impl<'a> MoneyLossDetector<'a> {
pub fn new(peers: &'a RefCell<[bool; 256]>,
manager: Arc<ChannelMan>,
- monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
+ monitor: Arc<channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
handler: PeerMan<'a>) -> Self {
MoneyLossDetector {
manager,
};
let broadcast = Arc::new(TestBroadcaster{});
- let monitor = Arc::new(channelmonitor::SimpleManyChannelMonitor::new(broadcast.clone(), Arc::clone(&logger), fee_est.clone()));
+ let monitor = Arc::new(channelmonitor::ChainMonitor::new(broadcast.clone(), Arc::clone(&logger), fee_est.clone()));
let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) });
let mut config = UserConfig::default();
//! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator;
//! type Logger = dyn lightning::util::logger::Logger;
//! type ChainAccess = dyn lightning::chain::Access;
-//! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>>;
-//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChannelMonitor, TxBroadcaster, FeeEstimator, Logger>;
-//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChannelMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>;
+//! type ChainMonitor = lightning::ln::channelmonitor::ChainMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>>;
+//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>;
+//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>;
//!
//! // Connect to node with pubkey their_node_id at addr:
-//! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
+//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
//! let (sender, mut receiver) = mpsc::channel(2);
//! lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await;
//! loop {
//! for _event in channel_manager.get_and_clear_pending_events().drain(..) {
//! // Handle the event!
//! }
-//! for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
+//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) {
//! // Handle the event!
//! }
//! }
//! }
//!
//! // Begin reading from a newly accepted socket and talk to the peer:
-//! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
+//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
//! let (sender, mut receiver) = mpsc::channel(2);
//! lightning_net_tokio::setup_inbound(peer_manager, sender, socket);
//! loop {
//! for _event in channel_manager.get_and_clear_pending_events().drain(..) {
//! // Handle the event!
//! }
-//! for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
+//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) {
//! // Handle the event!
//! }
//! }
let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {});
let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
{
let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
// Now set it to failed again...
let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
{
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
// Now try to send a second payment which will fail to send
let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
{
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
}
// Now fix monitor updating...
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let responses = nodes[1].node.get_and_clear_pending_msg_events();
assert!(updates.update_fee.is_none());
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
_ => panic!("Unexpected event"),
}
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
check_added_monitors!(nodes[1], 1);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 0);
check_added_monitors!(nodes[1], 1);
let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[0], 1);
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Now fail monitor updating.
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
}
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
// Restore monitor updating, ensuring we immediately get a fail-back update and a
// update_add update.
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
expect_pending_htlcs_forwardable!(nodes[1]);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
// Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
// nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
// then restore channel monitor updates.
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
check_added_monitors!(nodes[1], 1);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
// nodes[1] should be AwaitingRAA here!
check_added_monitors!(nodes[1], 0);
// Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
// update.
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Now un-fail the monitor, which will result in B sending its original commitment update,
// receiving the commitment update from A, and the resulting commitment dances.
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
check_added_monitors!(nodes[0], 1);
}
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let payment_event = SendEvent::from_event(events.pop().unwrap());
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
let payment_event = SendEvent::from_event(events.pop().unwrap());
assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
// Deliver the final RAA for the first payment, which does not require a response. RAAs
// generally require a commitment_signed, so the fact that we're expecting an opposite response
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
check_added_monitors!(nodes[1], 1);
// Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
// paused, so forward shouldn't succeed until we call channel_monitor_updated().
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
} else { panic!("Unexpected event!"); }
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
check_added_monitors!(nodes[0], 0);
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[0], 0);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
}
- *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
// Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
// (for the path 0 -> 2 -> 3) fails.
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+ *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
// Now check that we get the right return value, indicating that the first path succeeded but
// the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
} else { panic!(); }
check_added_monitors!(nodes[0], 2);
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
// Pass the first HTLC of the payment along to nodes[3].
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
// And check that, after we successfully update the monitor for chan_2 we can pass the second
// HTLC along to nodes[3] and claim the whole payment back to nodes[0].
- let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
/// `OutPoint` as the key, which will give you a [`chain::Watch`] implementation.
///
/// [`chain::Watch`]: ../../chain/trait.Watch.html
-pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref>
+pub struct ChainMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
}
impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
- ChainListener for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+ ChainListener for ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
}
}
-impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// Creates a new object which can be used to monitor several channels given the chain
/// interface with which to register to receive notifications.
- pub fn new(broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L> {
- let res = SimpleManyChannelMonitor {
+ pub fn new(broadcaster: T, logger: L, feeest: F) -> ChainMonitor<Key, ChanSigner, T, F, L> {
+ Self {
monitors: Mutex::new(HashMap::new()),
watch_events: Mutex::new(WatchEventQueue::new()),
broadcaster,
logger,
fee_estimator: feeest,
- };
-
- res
+ }
}
/// Adds or updates the monitor which monitors the channel referred to by the given key.
}
}
-impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send> chain::Watch for SimpleManyChannelMonitor<OutPoint, ChanSigner, T, F, L>
+impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send> chain::Watch for ChainMonitor<OutPoint, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
}
}
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> events::EventsProvider for ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
}
}
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> chain::WatchEventProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> chain::WatchEventProvider for ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
/// Gets the list of pending events which were generated by previous actions, clearing the list
/// in the process.
///
- /// This is called by SimpleManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
+ /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to
/// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
/// no internal locking in ChannelMonitors.
pub fn get_and_clear_pending_events(&mut self) -> Vec<events::Event> {
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
use util::enforcing_trait_impls::EnforcingChannelKeys;
use util::test_utils;
-use util::test_utils::TestChannelMonitor;
+use util::test_utils::TestChainMonitor;
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::errors::APIError;
use util::config::UserConfig;
use chain::WatchEventProvider;
use chain::chaininterface::ChainListener;
- let watch_events = node.chan_monitor.simple_monitor.release_pending_watch_events();
+ let watch_events = node.chain_monitor.chain_monitor.release_pending_watch_events();
process_chain_watch_events(&watch_events);
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
loop {
- node.chan_monitor.simple_monitor.block_connected(&block.header, &txdata, height);
+ node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
- let watch_events = node.chan_monitor.simple_monitor.release_pending_watch_events();
+ let watch_events = node.chain_monitor.chain_monitor.release_pending_watch_events();
process_chain_watch_events(&watch_events);
if watch_events.is_empty() {
pub chain_source: &'a test_utils::TestChainSource,
pub tx_broadcaster: &'a test_utils::TestBroadcaster,
pub fee_estimator: &'a test_utils::TestFeeEstimator,
- pub chan_monitor: test_utils::TestChannelMonitor<'a>,
+ pub chain_monitor: test_utils::TestChainMonitor<'a>,
pub keys_manager: test_utils::TestKeysInterface,
pub logger: &'a test_utils::TestLogger,
pub node_seed: [u8; 32],
pub block_notifier: chaininterface::BlockNotifierRef<'a>,
pub chain_source: &'c test_utils::TestChainSource,
pub tx_broadcaster: &'c test_utils::TestBroadcaster,
- pub chan_monitor: &'b test_utils::TestChannelMonitor<'c>,
+ pub chain_monitor: &'b test_utils::TestChainMonitor<'c>,
pub keys_manager: &'b test_utils::TestKeysInterface,
- pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
+ pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
pub net_graph_msg_handler: NetGraphMsgHandler<&'c test_utils::TestChainSource, &'c test_utils::TestLogger>,
pub node_seed: [u8; 32],
pub network_payment_count: Rc<RefCell<u8>>,
// Check that we processed all pending events
assert!(self.node.get_and_clear_pending_msg_events().is_empty());
assert!(self.node.get_and_clear_pending_events().is_empty());
- assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty());
+ assert!(self.chain_monitor.added_monitors.lock().unwrap().is_empty());
// Check that if we serialize the Router, we can deserialize it again.
{
let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 };
let mut deserialized_monitors = Vec::new();
{
- let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap();
+ let old_monitors = self.chain_monitor.chain_monitor.monitors.lock().unwrap();
for (_, old_monitor) in old_monitors.iter() {
let mut w = test_utils::TestVecWriter(Vec::new());
old_monitor.write_for_disk(&mut w).unwrap();
let mut w = test_utils::TestVecWriter(Vec::new());
self.node.write(&mut w).unwrap();
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
default_config: UserConfig::default(),
keys_manager: self.keys_manager,
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 },
- chain_monitor: self.chan_monitor,
+ chain_monitor: self.chain_monitor,
tx_broadcaster: self.tx_broadcaster.clone(),
logger: &test_utils::TestLogger::new(),
channel_monitors: &mut channel_monitors,
}).unwrap();
}
- let channel_monitor = test_utils::TestChannelMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest);
+ let channel_monitor = test_utils::TestChainMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest);
for deserialized_monitor in deserialized_monitors.drain(..) {
if let Err(_) = channel_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
panic!();
macro_rules! get_local_commitment_txn {
($node: expr, $channel_id: expr) => {
{
- let mut monitors = $node.chan_monitor.simple_monitor.monitors.lock().unwrap();
+ let mut monitors = $node.chain_monitor.chain_monitor.monitors.lock().unwrap();
let mut commitment_txn = None;
for (funding_txo, monitor) in monitors.iter_mut() {
if funding_txo.to_channel_id() == $channel_id {
macro_rules! check_added_monitors {
($node: expr, $count: expr) => {
{
- let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = $node.chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), $count);
added_monitors.clear();
}
node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
{
- let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
{
- let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
for i in 0..node_count {
let seed = [i as u8; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
- let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator);
- nodes.push(NodeCfg { chain_source: &chanmon_cfgs[i].chain_source, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chan_monitor, keys_manager, node_seed: seed });
+ let chain_monitor = test_utils::TestChainMonitor::new(&chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator);
+ nodes.push(NodeCfg { chain_source: &chanmon_cfgs[i].chain_source, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chain_monitor, keys_manager, node_seed: seed });
}
nodes
}
-pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
+pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChainMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
let mut chanmgrs = Vec::new();
for i in 0..node_count {
let mut default_config = UserConfig::default();
default_config.channel_options.announced_channel = true;
default_config.peer_channel_config_limits.force_announced_channel_preference = false;
default_config.own_channel_config.our_htlc_minimum_msat = 1000; // sanitization being done by the sender, to exerce receiver logic we need to lift of limit
- let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0);
+ let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0);
chanmgrs.push(node);
}
chanmgrs
}
-pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
+pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
let mut nodes = Vec::new();
let chan_count = Rc::new(RefCell::new(0));
let payment_count = Rc::new(RefCell::new(0));
for i in 0..node_count {
let block_notifier = chaininterface::BlockNotifier::new();
- block_notifier.register_listener(&cfgs[i].chan_monitor.simple_monitor as &chaininterface::ChainListener);
+ block_notifier.register_listener(&cfgs[i].chain_monitor.chain_monitor as &chaininterface::ChainListener);
block_notifier.register_listener(&chan_mgrs[i] as &chaininterface::ChainListener);
let net_graph_msg_handler = NetGraphMsgHandler::new(None, cfgs[i].logger);
nodes.push(Node{ chain_source: cfgs[i].chain_source, block_notifier,
- tx_broadcaster: cfgs[i].tx_broadcaster, chan_monitor: &cfgs[i].chan_monitor,
+ tx_broadcaster: cfgs[i].tx_broadcaster, chain_monitor: &cfgs[i].chain_monitor,
keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler,
node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
if steps & 0x0f == 4 { return; }
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
{
- let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
if steps & 0x0f == 5 { return; }
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
{
- let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
// nothing happens since node[1] is in AwaitingRemoteRevoke
nodes[1].node.send_payment(&route, our_payment_hash, &None).unwrap();
{
- let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 0);
added_monitors.clear();
}
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
connect_block(&nodes[1], &Block { header, txdata: node_txn}, 1);
{
- let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
added_monitors.clear();
}
let events = nodes[1].node.get_and_clear_pending_msg_events();
{
- let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 2);
assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
- let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
+ let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap();
monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
}
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let logger: test_utils::TestLogger;
let fee_estimator: test_utils::TestFeeEstimator;
- let new_chan_monitor: test_utils::TestChannelMonitor;
+ let new_chain_monitor: test_utils::TestChainMonitor;
let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
let nodes_0_serialized = nodes[0].node.encode();
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
logger = test_utils::TestLogger::new();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
- new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
- nodes[0].chan_monitor = &new_chan_monitor;
+ new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+ nodes[0].chain_monitor = &new_chain_monitor;
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
assert!(chan_0_monitor_read.is_empty());
let (_, nodes_0_deserialized_tmp) = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: config,
keys_manager: &keys_manager,
fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chan_monitor,
+ chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
logger: &logger,
channel_monitors: &mut channel_monitors,
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
nodes[0].node = &nodes_0_deserialized;
nodes[0].block_notifier.register_listener(nodes[0].node);
assert_eq!(nodes[0].node.list_channels().len(), 1);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let fee_estimator: test_utils::TestFeeEstimator;
let logger: test_utils::TestLogger;
- let new_chan_monitor: test_utils::TestChannelMonitor;
+ let new_chain_monitor: test_utils::TestChainMonitor;
let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe
node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
{
- let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
{
- let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+ let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
assert_eq!(added_monitors[0].0, funding_output);
added_monitors.clear();
// Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
let nodes_0_serialized = nodes[0].node.encode();
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
logger = test_utils::TestLogger::new();
- new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
- nodes[0].chan_monitor = &new_chan_monitor;
+ new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+ nodes[0].chain_monitor = &new_chain_monitor;
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
assert!(chan_0_monitor_read.is_empty());
let (_, nodes_0_deserialized_tmp) = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: config,
keys_manager: &keys_manager,
fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chan_monitor,
+ chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
logger: &logger,
channel_monitors: &mut channel_monitors,
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
- assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
nodes[0].node = &nodes_0_deserialized;
// After deserializing, make sure the FundingBroadcastSafe event is still held by the channel manager
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let logger: test_utils::TestLogger;
let fee_estimator: test_utils::TestFeeEstimator;
- let new_chan_monitor: test_utils::TestChannelMonitor;
+ let new_chain_monitor: test_utils::TestChainMonitor;
let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
let nodes_0_serialized = nodes[0].node.encode();
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
logger = test_utils::TestLogger::new();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
- new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
- nodes[0].chan_monitor = &new_chan_monitor;
+ new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+ nodes[0].chain_monitor = &new_chain_monitor;
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
assert!(chan_0_monitor_read.is_empty());
let (_, nodes_0_deserialized_tmp) = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
keys_manager: &keys_manager,
fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chan_monitor,
+ chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
logger: &logger,
channel_monitors: &mut channel_monitors,
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+ assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
nodes[0].node = &nodes_0_deserialized;
check_added_monitors!(nodes[0], 1);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let logger: test_utils::TestLogger;
let fee_estimator: test_utils::TestFeeEstimator;
- let new_chan_monitor: test_utils::TestChannelMonitor;
+ let new_chain_monitor: test_utils::TestChainMonitor;
let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known());
let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
let mut node_0_stale_monitors_serialized = Vec::new();
- for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+ for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
let mut writer = test_utils::TestVecWriter(Vec::new());
monitor.1.write_for_disk(&mut writer).unwrap();
node_0_stale_monitors_serialized.push(writer.0);
// Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
// nodes[3])
let mut node_0_monitors_serialized = Vec::new();
- for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+ for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
let mut writer = test_utils::TestVecWriter(Vec::new());
monitor.1.write_for_disk(&mut writer).unwrap();
node_0_monitors_serialized.push(writer.0);
logger = test_utils::TestLogger::new();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
- new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
- nodes[0].chan_monitor = &new_chan_monitor;
+ new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+ nodes[0].chain_monitor = &new_chain_monitor;
let mut node_0_stale_monitors = Vec::new();
for serialized in node_0_stale_monitors_serialized.iter() {
let mut nodes_0_read = &nodes_0_serialized[..];
if let Err(msgs::DecodeError::InvalidValue) =
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
keys_manager: &keys_manager,
fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chan_monitor,
+ chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
logger: &logger,
channel_monitors: &mut node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
let mut nodes_0_read = &nodes_0_serialized[..];
let (_, nodes_0_deserialized_tmp) =
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
keys_manager: &keys_manager,
fee_estimator: &fee_estimator,
- chain_monitor: nodes[0].chan_monitor,
+ chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
logger: &logger,
channel_monitors: &mut node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
}
for monitor in node_0_monitors.drain(..) {
- assert!(nodes[0].chan_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
+ assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
macro_rules! check_spendable_outputs {
($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => {
{
- let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events();
+ let events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
let mut txn = Vec::new();
for event in events {
match event {
// We manually create the node configuration to backup the seed.
let seed = [42; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
- let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator);
- let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chan_monitor, keys_manager, node_seed: seed };
+ let chain_monitor = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator);
+ let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager, node_seed: seed };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
node_cfgs.insert(0, node);
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
- nodes[0].chan_monitor.simple_monitor.block_connected(&header, &[(0, &dummy_tx)], 1);
+ nodes[0].chain_monitor.chain_monitor.block_connected(&header, &[(0, &dummy_tx)], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
// We broadcast a few more block to check everything is all right
// Cache node A state before any channel update
let previous_node_state = nodes[0].node.encode();
- let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
- nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
+ let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
+ nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chain_monitor_state).unwrap();
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
// Restore node A from previous state
logger = test_utils::TestLogger::with_id(format!("node {}", 0));
- let mut chan_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0)).unwrap().1;
+ let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0)).unwrap().1;
chain_source = test_utils::TestChainSource::new(Network::Testnet);
tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
- monitor = test_utils::TestChannelMonitor::new(&tx_broadcaster, &logger, &fee_estimator);
+ monitor = test_utils::TestChainMonitor::new(&tx_broadcaster, &logger, &fee_estimator);
node_state_0 = {
let mut channel_monitors = HashMap::new();
- channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chan_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+ channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
+ <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
keys_manager: &keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: &monitor,
}).unwrap().1
};
nodes[0].node = &node_state_0;
- assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok());
- nodes[0].chan_monitor = &monitor;
+ assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
+ nodes[0].chain_monitor = &monitor;
nodes[0].chain_source = &chain_source;
nodes[0].block_notifier = BlockNotifier::new();
- nodes[0].block_notifier.register_listener(&nodes[0].chan_monitor.simple_monitor);
+ nodes[0].block_notifier.register_listener(&nodes[0].chain_monitor.chain_monitor);
nodes[0].block_notifier.register_listener(nodes[0].node);
check_added_monitors!(nodes[0], 1);
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130);
connect_blocks(&nodes[0], 5, 130, false, header_130.bitcoin_hash());
{
- let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+ let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty());
assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty());
// Route a HTLC from node 0 to node 1 (but don't settle)
let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
- // Copy SimpleManyChannelMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
+ // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
let watchtower = {
- let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+ let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
let monitor = monitors.get(&outpoint).unwrap();
let mut w = test_utils::TestVecWriter(Vec::new());
monitor.write_for_disk(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
&mut ::std::io::Cursor::new(&w.0)).unwrap().1;
assert!(new_monitor == *monitor);
- let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+ let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
- watchtower.simple_monitor.block_connected(&header, &[], 200);
+ watchtower.chain_monitor.block_connected(&header, &[], 200);
// Try to update ChannelMonitor
assert!(nodes[1].node.claim_funds(preimage, &None, 9_000_000));
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
- if let Err(_) = watchtower.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
- if let Ok(_) = nodes[0].chan_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+ if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+ if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
} else { assert!(false); }
} else { assert!(false); };
// Our local monitor is in-sync and hasn't processed yet timeout
}
}
-pub struct TestChannelMonitor<'a> {
+pub struct TestChainMonitor<'a> {
pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
- pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger>,
+ pub chain_monitor: channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger>,
pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
// If this is set to Some(), after the next return, we'll always return this until update_ret
// is changed:
pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
}
-impl<'a> TestChannelMonitor<'a> {
+impl<'a> TestChainMonitor<'a> {
pub fn new(broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
latest_monitor_update_id: Mutex::new(HashMap::new()),
- simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(broadcaster, logger, fee_estimator),
+ chain_monitor: channelmonitor::ChainMonitor::new(broadcaster, logger, fee_estimator),
update_ret: Mutex::new(Ok(())),
next_update_ret: Mutex::new(None),
}
}
}
-impl<'a> chain::Watch for TestChannelMonitor<'a> {
+impl<'a> chain::Watch for TestChainMonitor<'a> {
type Keys = EnforcingChannelKeys;
fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
assert!(new_monitor == monitor);
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
- assert!(self.simple_monitor.watch_channel(funding_txo, new_monitor).is_ok());
+ assert!(self.chain_monitor.watch_channel(funding_txo, new_monitor).is_ok());
let ret = self.update_ret.lock().unwrap().clone();
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
&mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
- assert!(self.simple_monitor.update_channel(funding_txo, update).is_ok());
+ assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok());
// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
- let monitors = self.simple_monitor.monitors.lock().unwrap();
+ let monitors = self.chain_monitor.monitors.lock().unwrap();
let monitor = monitors.get(&funding_txo).unwrap();
w.0.clear();
monitor.write_for_disk(&mut w).unwrap();
}
fn release_pending_htlc_updates(&self) -> Vec<HTLCUpdate> {
- return self.simple_monitor.release_pending_htlc_updates();
+ return self.chain_monitor.release_pending_htlc_updates();
}
}