use bitcoin::hash_types::{BlockHash, WPubkeyHash};
use lightning::chain;
-use lightning::chain::{BestBlock, chainmonitor, channelmonitor, Confirm, Watch};
-use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
+use lightning::chain::{BestBlock, ChannelMonitorUpdateErr, chainmonitor, channelmonitor, Confirm, Watch};
+use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::chain::transaction::OutPoint;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::keysinterface::{KeysInterface, InMemorySigner};
self.0.extend_from_slice(buf);
Ok(())
}
- fn size_hint(&mut self, size: usize) {
- self.0.reserve_exact(size);
- }
}
struct TestChainMonitor {
pub logger: Arc<dyn Logger>,
pub keys: Arc<KeyProvider>,
+ pub persister: Arc<TestPersister>,
pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
- pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
// If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
// logic will automatically force-close our channels for us (as we don't have an up-to-date
// monitor implying we are not able to punish misbehaving counterparties). Because this test
impl TestChainMonitor {
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
Self {
- chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, persister)),
+ chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest, Arc::clone(&persister))),
logger,
keys,
- update_ret: Mutex::new(Ok(())),
+ persister,
latest_monitors: Mutex::new(HashMap::new()),
should_update_manager: atomic::AtomicBool::new(false),
}
}
}
impl chain::Watch<EnforcingSigner> for TestChainMonitor {
- fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
let mut ser = VecWriter(Vec::new());
monitor.write(&mut ser).unwrap();
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
panic!("Already had monitor pre-watch_channel");
}
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
- assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok());
- self.update_ret.lock().unwrap().clone()
+ self.chain_monitor.watch_channel(funding_txo, monitor)
}
- fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), chain::ChannelMonitorUpdateErr> {
let mut map_lock = self.latest_monitors.lock().unwrap();
let mut map_entry = match map_lock.entry(funding_txo) {
hash_map::Entry::Occupied(entry) => entry,
deserialized_monitor.write(&mut ser).unwrap();
map_entry.insert((update.update_id, ser.0));
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
- assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok());
- self.update_ret.lock().unwrap().clone()
+ self.chain_monitor.update_channel(funding_txo, update)
}
fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
($node_id: expr, $fee_estimator: expr) => { {
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
- let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager)));
+ let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
+ Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(&keys_manager)));
let mut config = UserConfig::default();
config.channel_options.forwarding_fee_proportional_millionths = 0;
($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { {
let keys_manager = Arc::clone(& $keys_manager);
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
- let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager)));
+ let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
+ Arc::new(TestPersister { update_ret: Mutex::new(Ok(())) }), Arc::clone(& $keys_manager)));
let mut config = UserConfig::default();
config.channel_options.forwarding_fee_proportional_millionths = 0;
},
events::MessageSendEvent::SendFundingLocked { .. } => continue,
events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => continue,
events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {
// Can be generated as a reestablish response
},
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
- // Can be generated due to a payment forward being rejected due to a
- // channel having previously failed a monitor update
- },
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
// When we reconnect we will resend a channel_update to make sure our
// counterparty has the latest parameters for receiving payments
events::MessageSendEvent::SendChannelReestablish { .. } => {},
events::MessageSendEvent::SendFundingLocked { .. } => {},
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
},
events::MessageSendEvent::SendChannelReestablish { .. } => {},
events::MessageSendEvent::SendFundingLocked { .. } => {},
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
- events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
},
}
},
events::Event::PaymentSent { .. } => {},
- events::Event::PaymentFailed { .. } => {},
+ events::Event::PaymentPathFailed { .. } => {},
events::Event::PaymentForwarded { .. } if $node == 1 => {},
events::Event::PendingHTLCsForwardable { .. } => {
nodes[$node].process_pending_htlc_forwards();
// bit-twiddling mutations to have similar effects. This is probably overkill, but no
// harm in doing so.
- 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
- 0x04 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
- 0x05 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
- 0x06 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
+ 0x00 => *monitor_a.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x01 => *monitor_b.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x02 => *monitor_c.persister.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x04 => *monitor_a.persister.update_ret.lock().unwrap() = Ok(()),
+ 0x05 => *monitor_b.persister.update_ret.lock().unwrap() = Ok(()),
+ 0x06 => *monitor_c.persister.update_ret.lock().unwrap() = Ok(()),
0x08 => {
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[0].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[0].process_monitor_events();
}
},
0x09 => {
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[1].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[1].process_monitor_events();
}
},
0x0a => {
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[1].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[1].process_monitor_events();
}
},
0x0b => {
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[2].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[2].process_monitor_events();
}
},
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
// First make sure there are no pending monitor updates, resetting the error state
- // and calling channel_monitor_updated for each monitor.
- *monitor_a.update_ret.lock().unwrap() = Ok(());
- *monitor_b.update_ret.lock().unwrap() = Ok(());
- *monitor_c.update_ret.lock().unwrap() = Ok(());
+ // and calling force_channel_monitor_updated for each monitor.
+ *monitor_a.persister.update_ret.lock().unwrap() = Ok(());
+ *monitor_b.persister.update_ret.lock().unwrap() = Ok(());
+ *monitor_c.persister.update_ret.lock().unwrap() = Ok(());
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[0].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[0].process_monitor_events();
}
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
- nodes[1].channel_monitor_updated(&chan_1_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
+ nodes[1].process_monitor_events();
}
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[1].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[1].process_monitor_events();
}
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
- nodes[2].channel_monitor_updated(&chan_2_funding, *id);
+ monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
+ nodes[2].process_monitor_events();
}
// Next, make sure peers are all connected to each other