From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Sat, 29 Feb 2020 02:59:34 +0000 (+0000) Subject: Merge pull request #507 from moneyball/patch-2 X-Git-Tag: v0.0.12~114 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=ce4de5fb52246f91d37127594f8fd7d304ab86ad;hp=0c5e3510d76cde18ea4a37031ae2dd6cfe6f3d3c;p=rust-lightning Merge pull request #507 from moneyball/patch-2 Add project tracking and conventions we want to adopt --- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3e77f5d2..7a0c71f4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,8 +7,8 @@ testing and patches. Anyone is invited to contribute without regard to technical experience, "expertise", OSS experience, age, or other concern. However, the development of cryptocurrencies demands a -high-level of rigor, adversial thinking, thorough testing and risk-minimization. -Any bug may cost users real money. That said we deeply welcome people contributing +high-level of rigor, adversarial thinking, thorough testing and risk-minimization. +Any bug may cost users real money. That being said, we deeply welcome people contributing for the first time to an open source project or pick up Rust while contributing. Don't be shy, you'll learn. diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index cd22dc59..81380f14 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -73,56 +73,59 @@ impl Writer for VecWriter { } } -static mut IN_RESTORE: bool = false; pub struct TestChannelMonitor { - pub simple_monitor: Arc>, + pub logger: Arc, + pub simple_monitor: Arc, Arc>>, pub update_ret: Mutex>, - pub latest_good_update: Mutex>>, - pub latest_update_good: Mutex>, - pub latest_updates_good_at_last_ser: Mutex>, + // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization + // logic will automatically force-close our channels for us (as we don't have an up-to-date + // monitor implying we are not able to punish misbehaving counterparties). Because this test + // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest + // fully-serialized monitor state here, as well as the corresponding update_id. + pub latest_monitors: Mutex)>>, pub should_update_manager: atomic::AtomicBool, } impl TestChannelMonitor { - pub fn new(chain_monitor: Arc, broadcaster: Arc, logger: Arc, feeest: Arc) -> Self { + pub fn new(chain_monitor: Arc, broadcaster: Arc, logger: Arc, feeest: Arc) -> Self { Self { - simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest)), + simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger.clone(), feeest)), + logger, update_ret: Mutex::new(Ok(())), - latest_good_update: Mutex::new(HashMap::new()), - latest_update_good: Mutex::new(HashMap::new()), - latest_updates_good_at_last_ser: Mutex::new(HashMap::new()), + latest_monitors: Mutex::new(HashMap::new()), should_update_manager: atomic::AtomicBool::new(false), } } } impl channelmonitor::ManyChannelMonitor for TestChannelMonitor { - fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { - let ret = self.update_ret.lock().unwrap().clone(); - if let Ok(()) = ret { - let mut ser = VecWriter(Vec::new()); - monitor.write_for_disk(&mut ser).unwrap(); - self.latest_good_update.lock().unwrap().insert(funding_txo, ser.0); - match self.latest_update_good.lock().unwrap().entry(funding_txo) { - hash_map::Entry::Vacant(e) => { e.insert(true); }, - hash_map::Entry::Occupied(mut e) => { - if !e.get() && unsafe { IN_RESTORE } { - // Technically we can't consider an update to be "good" unless we're doing - // it in response to a test_restore_channel_monitor as the channel may - // still be waiting on such a call, so only set us to good if we're in the - // middle of a restore call. - e.insert(true); - } - }, - } - self.should_update_manager.store(true, atomic::Ordering::Relaxed); - } else { - self.latest_update_good.lock().unwrap().insert(funding_txo, false); + fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + let mut ser = VecWriter(Vec::new()); + monitor.write_for_disk(&mut ser).unwrap(); + if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { + panic!("Already had monitor pre-add_monitor"); } - assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok()); - ret + self.should_update_manager.store(true, atomic::Ordering::Relaxed); + assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok()); + self.update_ret.lock().unwrap().clone() } - fn fetch_pending_htlc_updated(&self) -> Vec { - return self.simple_monitor.fetch_pending_htlc_updated(); + fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { + let mut map_lock = self.latest_monitors.lock().unwrap(); + let mut map_entry = match map_lock.entry(funding_txo) { + hash_map::Entry::Occupied(entry) => entry, + hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"), + }; + let mut deserialized_monitor = <(Sha256d, channelmonitor::ChannelMonitor)>:: + read(&mut Cursor::new(&map_entry.get().1), Arc::clone(&self.logger)).unwrap().1; + deserialized_monitor.update_monitor(update.clone()).unwrap(); + let mut ser = VecWriter(Vec::new()); + deserialized_monitor.write_for_disk(&mut ser).unwrap(); + map_entry.insert((update.update_id, ser.0)); + self.should_update_manager.store(true, atomic::Ordering::Relaxed); + self.update_ret.lock().unwrap().clone() + } + + fn get_and_clear_pending_htlcs_updated(&self) -> Vec { + return self.simple_monitor.get_and_clear_pending_htlcs_updated(); } } @@ -192,7 +195,7 @@ pub fn do_test(data: &[u8]) { config.channel_options.fee_proportional_millionths = 0; config.channel_options.announced_channel = true; config.peer_channel_config_limits.min_dust_limit_satoshis = 0; - (Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone() as Arc>, broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()), + (Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()), monitor) } } } @@ -210,10 +213,10 @@ pub fn do_test(data: &[u8]) { config.peer_channel_config_limits.min_dust_limit_satoshis = 0; let mut monitors = HashMap::new(); - let mut old_monitors = $old_monitors.latest_good_update.lock().unwrap(); - for (outpoint, monitor_ser) in old_monitors.drain() { + let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap(); + for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() { monitors.insert(outpoint, <(Sha256d, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), Arc::clone(&logger)).expect("Failed to read monitor").1); - monitor.latest_good_update.lock().unwrap().insert(outpoint, monitor_ser); + monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser)); } let mut monitor_refs = HashMap::new(); for (outpoint, monitor) in monitors.iter_mut() { @@ -223,24 +226,14 @@ pub fn do_test(data: &[u8]) { let read_args = ChannelManagerReadArgs { keys_manager, fee_estimator: fee_est.clone(), - monitor: monitor.clone() as Arc>, + monitor: monitor.clone(), tx_broadcaster: broadcast.clone(), logger, default_config: config, channel_monitors: &mut monitor_refs, }; - let res = (<(Sha256d, ChannelManager>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor); - for (_, was_good) in $old_monitors.latest_updates_good_at_last_ser.lock().unwrap().iter() { - if !was_good { - // If the last time we updated a monitor we didn't successfully update (and we - // have sense updated our serialized copy of the ChannelManager) we may - // force-close the channel on our counterparty cause we know we're missing - // something. Thus, we just return here since we can't continue to test. - return; - } - } - res + (<(Sha256d, ChannelManager, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor) } } } @@ -266,6 +259,7 @@ pub fn do_test(data: &[u8]) { }; $source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::supported(), &accept_channel); + let funding_output; { let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -273,7 +267,7 @@ pub fn do_test(data: &[u8]) { let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut { value: *channel_value_satoshis, script_pubkey: output_script.clone(), }]}; - let funding_output = OutPoint::new(tx.txid(), 0); + funding_output = OutPoint::new(tx.txid(), 0); $source.funding_transaction_generated(&temporary_channel_id, funding_output); channel_txn.push(tx); } else { panic!("Wrong event type"); } @@ -303,6 +297,7 @@ pub fn do_test(data: &[u8]) { if let events::Event::FundingBroadcastSafe { .. } = events[0] { } else { panic!("Wrong event type"); } } + funding_output } } } @@ -359,8 +354,8 @@ pub fn do_test(data: &[u8]) { let mut nodes = [node_a, node_b, node_c]; - make_channel!(nodes[0], nodes[1], 0); - make_channel!(nodes[1], nodes[2], 1); + let chan_1_funding = make_channel!(nodes[0], nodes[1], 0); + let chan_2_funding = make_channel!(nodes[1], nodes[2], 1); for node in nodes.iter() { confirm_txn!(node); @@ -631,9 +626,26 @@ pub fn do_test(data: &[u8]) { 0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()), 0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()), 0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()), - 0x06 => { unsafe { IN_RESTORE = true }; nodes[0].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; }, - 0x07 => { unsafe { IN_RESTORE = true }; nodes[1].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; }, - 0x08 => { unsafe { IN_RESTORE = true }; nodes[2].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; }, + 0x06 => { + if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { + nodes[0].channel_monitor_updated(&chan_1_funding, *id); + } + }, + 0x07 => { + if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { + nodes[1].channel_monitor_updated(&chan_1_funding, *id); + } + }, + 0x24 => { + if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { + nodes[1].channel_monitor_updated(&chan_2_funding, *id); + } + }, + 0x08 => { + if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { + nodes[2].channel_monitor_updated(&chan_2_funding, *id); + } + }, 0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)), 0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)), 0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)), @@ -722,27 +734,19 @@ pub fn do_test(data: &[u8]) { nodes[2] = node_c.clone(); monitor_c = new_monitor_c; }, + // 0x24 defined above _ => test_return!(), } - if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) { - node_a_ser.0.clear(); - nodes[0].write(&mut node_a_ser).unwrap(); - monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed); - *monitor_a.latest_updates_good_at_last_ser.lock().unwrap() = monitor_a.latest_update_good.lock().unwrap().clone(); - } - if monitor_b.should_update_manager.load(atomic::Ordering::Relaxed) { - node_b_ser.0.clear(); - nodes[1].write(&mut node_b_ser).unwrap(); - monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed); - *monitor_b.latest_updates_good_at_last_ser.lock().unwrap() = monitor_b.latest_update_good.lock().unwrap().clone(); - } - if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) { - node_c_ser.0.clear(); - nodes[2].write(&mut node_c_ser).unwrap(); - monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed); - *monitor_c.latest_updates_good_at_last_ser.lock().unwrap() = monitor_c.latest_update_good.lock().unwrap().clone(); - } + node_a_ser.0.clear(); + nodes[0].write(&mut node_a_ser).unwrap(); + monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed); + node_b_ser.0.clear(); + nodes[1].write(&mut node_b_ser).unwrap(); + monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed); + node_c_ser.0.clear(); + nodes[2].write(&mut node_c_ser).unwrap(); + monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed); } } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 568f8085..242c5957 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -136,9 +136,9 @@ impl<'a> Hash for Peer<'a> { } struct MoneyLossDetector<'a> { - manager: Arc>>>, - monitor: Arc>, - handler: PeerManager, Arc>>>>, + manager: Arc, Arc>>, Arc, Arc, Arc>>, + monitor: Arc, Arc>>, + handler: PeerManager, Arc, Arc>>, Arc, Arc, Arc>>>, peers: &'a RefCell<[bool; 256]>, funding_txn: Vec, @@ -150,9 +150,9 @@ struct MoneyLossDetector<'a> { } impl<'a> MoneyLossDetector<'a> { pub fn new(peers: &'a RefCell<[bool; 256]>, - manager: Arc>>>, - monitor: Arc>, - handler: PeerManager, Arc>>>>) -> Self { + manager: Arc, Arc>>, Arc, Arc, Arc>>, + monitor: Arc, Arc>>, + handler: PeerManager, Arc, Arc>>, Arc, Arc, Arc>>>) -> Self { MoneyLossDetector { manager, monitor, @@ -217,7 +217,7 @@ impl<'a> Drop for MoneyLossDetector<'a> { // Disconnect all peers for (idx, peer) in self.peers.borrow().iter().enumerate() { if *peer { - self.handler.disconnect_event(&Peer{id: idx as u8, peers_connected: &self.peers}); + self.handler.socket_disconnected(&Peer{id: idx as u8, peers_connected: &self.peers}); } } @@ -333,7 +333,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { config.channel_options.fee_proportional_millionths = slice_to_be32(get_slice!(4)); config.channel_options.announced_channel = get_slice!(1)[0] != 0; config.peer_channel_config_limits.min_dust_limit_satoshis = 0; - let channelmanager = Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone() as Arc>, broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()); + let channelmanager = Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()); let router = Arc::new(Router::new(PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret()), watch.clone(), Arc::clone(&logger))); let peers = RefCell::new([false; 256]); @@ -378,7 +378,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { 2 => { let peer_id = get_slice!(1)[0]; if !peers.borrow()[peer_id as usize] { return; } - loss_detector.handler.disconnect_event(&Peer{id: peer_id, peers_connected: &peers}); + loss_detector.handler.socket_disconnected(&Peer{id: peer_id, peers_connected: &peers}); peers.borrow_mut()[peer_id as usize] = false; }, 3 => { diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 434e1b65..b0d7b603 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -118,7 +118,7 @@ pub fn do_test(data: &[u8]) { macro_rules! decode_msg { ($MsgType: path, $len: expr) => {{ let mut reader = ::std::io::Cursor::new(get_slice!($len)); - match <($MsgType)>::read(&mut reader) { + match <$MsgType>::read(&mut reader) { Ok(msg) => msg, Err(e) => match e { msgs::DecodeError::UnknownVersion => return, diff --git a/fuzz/travis-fuzz.sh b/fuzz/travis-fuzz.sh index 5ec431d7..57e32647 100755 --- a/fuzz/travis-fuzz.sh +++ b/fuzz/travis-fuzz.sh @@ -13,6 +13,7 @@ rm *_target.rs popd cargo install --force honggfuzz +sed -i 's/lto = true//' Cargo.toml HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" cargo hfuzz build for TARGET in src/bin/*.rs; do FILENAME=$(basename $TARGET) diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 47e17918..c2bac324 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -59,7 +59,7 @@ impl Connection { return future::Either::A(blocker.then(|_| { Ok(()) })); } } - //TODO: There's a race where we don't meet the requirements of disconnect_socket if its + //TODO: There's a race where we don't meet the requirements of socket_disconnected if its //called right here, after we release the us_ref lock in the scope above, but before we //call read_event! match peer_manager.read_event(&mut SocketDescriptor::new(us_ref.clone(), peer_manager.clone()), pending_read) { @@ -84,7 +84,7 @@ impl Connection { future::Either::B(future::result(Ok(()))) }).then(move |_| { if us_close_ref.lock().unwrap().need_disconnect { - peer_manager_ref.disconnect_event(&SocketDescriptor::new(us_close_ref, peer_manager_ref.clone())); + peer_manager_ref.socket_disconnected(&SocketDescriptor::new(us_close_ref, peer_manager_ref.clone())); println!("Peer disconnected!"); } else { println!("We disconnected peer!"); diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 7a077e89..73aecfe9 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -19,6 +19,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::HashSet; use std::ops::Deref; use std::marker::PhantomData; +use std::ptr; /// Used to give chain error details upstream pub enum ChainError { @@ -253,11 +254,22 @@ impl<'a, CL: Deref + 'a> BlockNotifier<'a, CL> { } /// Register the given listener to receive events. - // TODO: unregister pub fn register_listener(&self, listener: CL) { let mut vec = self.listeners.lock().unwrap(); vec.push(listener); } + /// Unregister the given listener to no longer + /// receive events. + /// + /// If the same listener is registered multiple times, unregistering + /// will remove ALL occurrences of that listener. Comparison is done using + /// the pointer returned by the Deref trait implementation. + pub fn unregister_listener(&self, listener: CL) { + let mut vec = self.listeners.lock().unwrap(); + // item is a ref to an abstract thing that dereferences to a ChainListener, + // so dereference it twice to get the ChainListener itself + vec.retain(|item | !ptr::eq(&(**item), &(*listener))); + } /// Notify listeners that a block was connected given a full, unfiltered block. /// @@ -388,3 +400,80 @@ impl ChainWatchInterfaceUtil { watched.does_match_tx(tx) } } + +#[cfg(test)] +mod tests { + use ln::functional_test_utils::{create_chanmon_cfgs, create_node_cfgs}; + use super::{BlockNotifier, ChainListener}; + use std::ptr; + + #[test] + fn register_listener_test() { + let chanmon_cfgs = create_chanmon_cfgs(1); + let node_cfgs = create_node_cfgs(1, &chanmon_cfgs); + let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone()); + assert_eq!(block_notifier.listeners.lock().unwrap().len(), 0); + let listener = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener; + block_notifier.register_listener(listener); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 1); + let item = vec.first().clone().unwrap(); + assert!(ptr::eq(&(**item), &(*listener))); + } + + #[test] + fn unregister_single_listener_test() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone()); + let listener1 = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener; + let listener2 = &node_cfgs[1].chan_monitor.simple_monitor as &ChainListener; + block_notifier.register_listener(listener1); + block_notifier.register_listener(listener2); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 2); + drop(vec); + block_notifier.unregister_listener(listener1); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 1); + let item = vec.first().clone().unwrap(); + assert!(ptr::eq(&(**item), &(*listener2))); + } + + #[test] + fn unregister_single_listener_ref_test() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone()); + block_notifier.register_listener(&node_cfgs[0].chan_monitor.simple_monitor as &ChainListener); + block_notifier.register_listener(&node_cfgs[1].chan_monitor.simple_monitor as &ChainListener); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 2); + drop(vec); + block_notifier.unregister_listener(&node_cfgs[0].chan_monitor.simple_monitor); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 1); + let item = vec.first().clone().unwrap(); + assert!(ptr::eq(&(**item), &(*&node_cfgs[1].chan_monitor.simple_monitor))); + } + + #[test] + fn unregister_multiple_of_the_same_listeners_test() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone()); + let listener1 = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener; + let listener2 = &node_cfgs[1].chan_monitor.simple_monitor as &ChainListener; + block_notifier.register_listener(listener1); + block_notifier.register_listener(listener1); + block_notifier.register_listener(listener2); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 3); + drop(vec); + block_notifier.unregister_listener(listener1); + let vec = block_notifier.listeners.lock().unwrap(); + assert_eq!(vec.len(), 1); + let item = vec.first().clone().unwrap(); + assert!(ptr::eq(&(**item), &(*listener2))); + } +} diff --git a/lightning/src/chain/keysinterface.rs b/lightning/src/chain/keysinterface.rs index 158f71db..544df015 100644 --- a/lightning/src/chain/keysinterface.rs +++ b/lightning/src/chain/keysinterface.rs @@ -88,6 +88,57 @@ pub enum SpendableOutputDescriptor { } } +impl Writeable for SpendableOutputDescriptor { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { + 0u8.write(writer)?; + outpoint.write(writer)?; + output.write(writer)?; + }, + &SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => { + 1u8.write(writer)?; + outpoint.write(writer)?; + key.write(writer)?; + witness_script.write(writer)?; + to_self_delay.write(writer)?; + output.write(writer)?; + }, + &SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => { + 2u8.write(writer)?; + outpoint.write(writer)?; + key.write(writer)?; + output.write(writer)?; + }, + } + Ok(()) + } +} + +impl Readable for SpendableOutputDescriptor { + fn read(reader: &mut R) -> Result { + match Readable::read(reader)? { + 0u8 => Ok(SpendableOutputDescriptor::StaticOutput { + outpoint: Readable::read(reader)?, + output: Readable::read(reader)?, + }), + 1u8 => Ok(SpendableOutputDescriptor::DynamicOutputP2WSH { + outpoint: Readable::read(reader)?, + key: Readable::read(reader)?, + witness_script: Readable::read(reader)?, + to_self_delay: Readable::read(reader)?, + output: Readable::read(reader)?, + }), + 2u8 => Ok(SpendableOutputDescriptor::DynamicOutputP2WPKH { + outpoint: Readable::read(reader)?, + key: Readable::read(reader)?, + output: Readable::read(reader)?, + }), + _ => Err(DecodeError::InvalidValue), + } + } +} + /// A trait to describe an object which can get user secrets and key material. pub trait KeysInterface: Send + Sync { /// A type which implements ChannelKeys which will be returned by get_channel_keys. @@ -135,7 +186,8 @@ pub trait KeysInterface: Send + Sync { /// (TODO: We shouldn't require that, and should have an API to get them at deser time, due mostly /// to the possibility of reentrancy issues by calling the user's code during our deserialization /// routine). -/// TODO: remove Clone once we start returning ChannelUpdate objects instead of copying ChannelMonitor +/// TODO: We should remove Clone by instead requesting a new ChannelKeys copy when we create +/// ChannelMonitors instead of expecting to clone the one out of the Channel into the monitors. pub trait ChannelKeys : Send+Clone { /// Gets the private key for the anchor tx fn funding_key<'a>(&'a self) -> &'a SecretKey; diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index ce43984e..0f479ff9 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -39,6 +39,8 @@ impl OutPoint { } } +impl_writeable!(OutPoint, 0, { txid, index }); + #[cfg(test)] mod tests { use chain::transaction::OutPoint; diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index e7bea909..3fd489fa 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -17,6 +17,7 @@ use bitcoin_hashes::sha256d::Hash as Sha256dHash; use ln::channelmanager::{PaymentHash, PaymentPreimage}; use ln::msgs::DecodeError; use util::ser::{Readable, Writeable, Writer, WriterWriteAdaptor}; +use util::byte_utils; use secp256k1::key::{SecretKey, PublicKey}; use secp256k1::{Secp256k1, Signature}; @@ -59,6 +60,114 @@ pub(super) fn build_commitment_secret(commitment_seed: &[u8; 32], idx: u64) -> [ res } +/// Implements the per-commitment secret storage scheme from +/// [BOLT 3](https://github.com/lightningnetwork/lightning-rfc/blob/dcbf8583976df087c79c3ce0b535311212e6812d/03-transactions.md#efficient-per-commitment-secret-storage). +/// +/// Allows us to keep track of all of the revocation secrets of counterarties in just 50*32 bytes +/// or so. +#[derive(Clone)] +pub(super) struct CounterpartyCommitmentSecrets { + old_secrets: [([u8; 32], u64); 49], +} + +impl PartialEq for CounterpartyCommitmentSecrets { + fn eq(&self, other: &Self) -> bool { + for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) { + if secret != o_secret || idx != o_idx { + return false + } + } + true + } +} + +impl CounterpartyCommitmentSecrets { + pub(super) fn new() -> Self { + Self { old_secrets: [([0; 32], 1 << 48); 49], } + } + + #[inline] + fn place_secret(idx: u64) -> u8 { + for i in 0..48 { + if idx & (1 << i) == (1 << i) { + return i + } + } + 48 + } + + pub(super) fn get_min_seen_secret(&self) -> u64 { + //TODO This can be optimized? + let mut min = 1 << 48; + for &(_, idx) in self.old_secrets.iter() { + if idx < min { + min = idx; + } + } + min + } + + #[inline] + pub(super) fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] { + let mut res: [u8; 32] = secret; + for i in 0..bits { + let bitpos = bits - 1 - i; + if idx & (1 << bitpos) == (1 << bitpos) { + res[(bitpos / 8) as usize] ^= 1 << (bitpos & 7); + res = Sha256::hash(&res).into_inner(); + } + } + res + } + + pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), ()> { + let pos = Self::place_secret(idx); + for i in 0..pos { + let (old_secret, old_idx) = self.old_secrets[i as usize]; + if Self::derive_secret(secret, pos, old_idx) != old_secret { + return Err(()); + } + } + if self.get_min_seen_secret() <= idx { + return Ok(()); + } + self.old_secrets[pos as usize] = (secret, idx); + Ok(()) + } + + /// Can only fail if idx is < get_min_seen_secret + pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> { + for i in 0..self.old_secrets.len() { + if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 { + return Some(Self::derive_secret(self.old_secrets[i].0, i as u8, idx)) + } + } + assert!(idx < self.get_min_seen_secret()); + None + } +} + +impl Writeable for CounterpartyCommitmentSecrets { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + for &(ref secret, ref idx) in self.old_secrets.iter() { + writer.write_all(secret)?; + writer.write_all(&byte_utils::be64_to_array(*idx))?; + } + Ok(()) + } +} +impl Readable for CounterpartyCommitmentSecrets { + fn read(reader: &mut R) -> Result { + let mut old_secrets = [([0; 32], 1 << 48); 49]; + for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() { + *secret = Readable::read(reader)?; + *idx = Readable::read(reader)?; + } + + Ok(Self { old_secrets }) + } +} + /// Derives a per-commitment-transaction private key (eg an htlc key or payment key) from the base /// private key for that type of key and the per_commitment_point (available in TxCreationKeys) pub fn derive_private_key(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, base_secret: &SecretKey) -> Result { @@ -137,7 +246,7 @@ pub(super) fn derive_public_revocation_key(secp_ctx: /// The set of public keys which are used in the creation of one commitment transaction. /// These are derived from the channel base keys and per-commitment data. -#[derive(PartialEq)] +#[derive(PartialEq, Clone)] pub struct TxCreationKeys { /// The per-commitment public key which was used to derive the other keys. pub per_commitment_point: PublicKey, @@ -153,6 +262,8 @@ pub struct TxCreationKeys { /// B's Payment Key pub(crate) b_payment_key: PublicKey, } +impl_writeable!(TxCreationKeys, 33*6, + { per_commitment_point, revocation_key, a_htlc_key, b_htlc_key, a_delayed_payment_key, b_payment_key }); /// One counterparty's public keys which do not change over the life of a channel. #[derive(Clone, PartialEq)] @@ -235,6 +346,14 @@ pub struct HTLCOutputInCommitment { pub transaction_output_index: Option, } +impl_writeable!(HTLCOutputInCommitment, 1 + 8 + 4 + 32 + 5, { + offered, + amount_msat, + cltv_expiry, + payment_hash, + transaction_output_index +}); + #[inline] pub(super) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a_htlc_key: &PublicKey, b_htlc_key: &PublicKey, revocation_key: &PublicKey) -> Script { let payment_hash160 = Ripemd160::hash(&htlc.payment_hash.0[..]).into_inner(); @@ -505,3 +624,354 @@ impl Readable for LocalCommitmentTransaction { Ok(Self { tx }) } } + +#[cfg(test)] +mod tests { + use super::CounterpartyCommitmentSecrets; + use hex; + + #[test] + fn test_per_commitment_storage() { + // Test vectors from BOLT 3: + let mut secrets: Vec<[u8; 32]> = Vec::new(); + let mut monitor; + + macro_rules! test_secrets { + () => { + let mut idx = 281474976710655; + for secret in secrets.iter() { + assert_eq!(monitor.get_secret(idx).unwrap(), *secret); + idx -= 1; + } + assert_eq!(monitor.get_min_seen_secret(), idx + 1); + assert!(monitor.get_secret(idx).is_none()); + }; + } + + { + // insert_secret correct sequence + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap()); + monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap()); + monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap()); + monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap()); + monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + } + + { + // insert_secret #1 incorrect + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + assert!(monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #2 incorrect (#1 derived from incorrect) + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + assert!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #3 incorrect + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + assert!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #4 incorrect (1,2,3 derived from incorrect) + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4").unwrap()); + monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap()); + monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap()); + monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap()); + monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap()); + assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #5 incorrect + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap()); + monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap()); + assert!(monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #6 incorrect (5 derived from incorrect) + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap()); + monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1").unwrap()); + monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap()); + monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap()); + assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #7 incorrect + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap()); + monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap()); + monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3").unwrap()); + monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap()); + assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err()); + } + + { + // insert_secret #8 incorrect + monitor = CounterpartyCommitmentSecrets::new(); + secrets.clear(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()); + monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap()); + monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); + monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); + monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap()); + monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap()); + monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap()); + monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap(); + test_secrets!(); + + secrets.push([0; 32]); + secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4").unwrap()); + assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err()); + } + } +} diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5772d015..562b2152 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3,6 +3,7 @@ //! There are a bunch of these as their handling is relatively error-prone so they are split out //! here. See also the chanmon_fail_consistency fuzz test. +use chain::transaction::OutPoint; use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash}; use ln::channelmonitor::ChannelMonitorUpdateErr; use ln::features::InitFeatures; @@ -19,7 +20,8 @@ use ln::functional_test_utils::*; #[test] fn test_simple_monitor_permanent_update_fail() { // Test that we handle a simple permanent monitor update failure - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); @@ -51,10 +53,11 @@ fn test_simple_monitor_permanent_update_fail() { fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Test that we can recover from a simple temporary monitor update failure optionally with // a disconnect in between - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]); @@ -74,8 +77,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -114,10 +118,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } - // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + // ...and make sure we can force-close a frozen channel + nodes[0].node.force_close_channel(&channel_id); + check_added_monitors!(nodes[0], 0); check_closed_broadcast!(nodes[0], false); // TODO: Once we hit the chain with the failure transaction we should check that we get a @@ -152,10 +155,11 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // * We then walk through more message exchanges to get the original update_add_htlc // through, swapping message ordering based on disconnect_count & 8 and optionally // disconnect/reconnecting based on disconnect_count. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -198,6 +202,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); } @@ -214,8 +219,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); @@ -480,10 +486,11 @@ fn test_monitor_temporary_update_fail_c() { #[test] fn test_monitor_update_fail_cs() { // Tests handling of a monitor update failure when processing an incoming commitment_signed - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -501,8 +508,9 @@ fn test_monitor_update_fail_cs() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -534,8 +542,9 @@ fn test_monitor_update_fail_cs() { } *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa); @@ -559,12 +568,13 @@ fn test_monitor_update_fail_cs() { #[test] fn test_monitor_update_fail_no_rebroadcast() { // Tests handling of a monitor update failure when no message rebroadcasting on - // test_restore_channel_monitor() is required. Backported from - // chanmon_fail_consistency fuzz tests. - let node_cfgs = create_node_cfgs(2); + // channel_monitor_updated() is required. Backported from chanmon_fail_consistency + // fuzz tests. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -584,9 +594,10 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors!(nodes[1], 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); let events = nodes[1].node.get_and_clear_pending_events(); @@ -605,10 +616,11 @@ fn test_monitor_update_fail_no_rebroadcast() { fn test_monitor_update_raa_while_paused() { // Tests handling of an RAA while monitor updating has already been marked failed. // Backported from chanmon_fail_consistency fuzz tests as this used to be broken. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000); @@ -642,8 +654,9 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[0], 1); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[0], 0); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0); @@ -674,7 +687,8 @@ fn test_monitor_update_raa_while_paused() { fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Tests handling of a monitor update failure when processing an incoming RAA - let node_cfgs = create_node_cfgs(3); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); @@ -784,6 +798,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -794,8 +809,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -929,10 +945,11 @@ fn test_monitor_update_fail_reestablish() { // Simple test for message retransmission after monitor update failure on // channel_reestablish generating a monitor update (which comes from freeing holding cell // HTLCs). - let node_cfgs = create_node_cfgs(3); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported()); let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); @@ -983,8 +1000,9 @@ fn test_monitor_update_fail_reestablish() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -1009,10 +1027,11 @@ fn raa_no_response_awaiting_raa_state() { // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel // in question (assuming it intends to respond with a CS after monitor updating is restored). // Backported from chanmon_fail_consistency fuzz tests as this used to be broken. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); @@ -1063,9 +1082,10 @@ fn raa_no_response_awaiting_raa_state() { check_added_monitors!(nodes[1], 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); // nodes[1] should be AwaitingRAA here! - check_added_monitors!(nodes[1], 1); + check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_1, 1000000); @@ -1124,10 +1144,11 @@ fn claim_while_disconnected_monitor_update_fail() { // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling // code introduced a regression in this test (specifically, this caught a removal of the // channel_reestablish handling ensuring the order was sensical given the messages used). - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; // Forward a payment for B to claim let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -1167,16 +1188,18 @@ fn claim_while_disconnected_monitor_update_fail() { let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); + check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC - // until we've test_restore_channel_monitor'd and updated for the new commitment transaction. + // until we've channel_monitor_update'd and updated for the new commitment transaction. // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msgs.len(), 2); @@ -1241,10 +1264,11 @@ fn monitor_failed_no_reestablish_response() { // response to a commitment_signed. // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing // debug_assert!() failure in channel_reestablish handling. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). @@ -1278,8 +1302,9 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0); @@ -1309,10 +1334,11 @@ fn first_message_on_recv_ordering() { // have no pending response but will want to send a RAA/CS (with the updates for the second // payment applied). // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; // Route the first payment outbound, holding the last RAA for B until we are set up so that we // can deliver it and fail the monitor update. @@ -1358,16 +1384,18 @@ fn first_message_on_recv_ordering() { check_added_monitors!(nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an - // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with - // the appropriate HTLC acceptance). + // RAA/CS response, which should be generated when we call channel_monitor_update (with the + // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_received!(nodes[1], payment_hash_1, 1000000); @@ -1396,7 +1424,8 @@ fn test_monitor_update_fail_claim() { // update to claim the payment. We then send a payment C->B->A, making the forward of this // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor // updating and claim the payment on B. - let node_cfgs = create_node_cfgs(3); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); @@ -1417,7 +1446,7 @@ fn test_monitor_update_fail_claim() { check_added_monitors!(nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be - // paused, so forward shouldn't succeed until we call test_restore_channel_monitor(). + // paused, so forward shouldn't succeed until we call channel_monitor_updated(). *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -1451,8 +1480,9 @@ fn test_monitor_update_fail_claim() { } else { panic!("Unexpected event!"); } // Now restore monitor updating on the 0<->1 channel and claim the funds on B. - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); @@ -1471,10 +1501,11 @@ fn test_monitor_update_on_pending_forwards() { // We do this with a simple 3-node network, sending a payment from A to C and one from C to A. // The payment from A to C will be failed by C and pending a back-fail to A, while the payment // from C to A will be pending a forward to A. - let node_cfgs = create_node_cfgs(3); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported()); // Rebalance a bit so that we can send backwards from 3 to 1. @@ -1508,8 +1539,9 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); @@ -1538,10 +1570,11 @@ fn monitor_update_claim_fail_no_response() { // to channel being AwaitingRAA). // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling // code was broken. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; // Forward a payment for B to claim let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -1566,8 +1599,9 @@ fn monitor_update_claim_fail_no_response() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); @@ -1599,7 +1633,8 @@ fn monitor_update_claim_fail_no_response() { fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) { // Test that if the monitor update generated by funding_transaction_generated fails we continue // the channel setup happily after the update is restored. - let node_cfgs = create_node_cfgs(2); + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1616,14 +1651,17 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: check_added_monitors!(nodes[0], 1); *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); if restore_between_fails { assert!(fail_on_generate); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1639,18 +1677,20 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); if fail_on_generate && !restore_between_fails { nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented funding_signed from allowing funding broadcast".to_string(), 1); - check_added_monitors!(nodes[0], 0); + check_added_monitors!(nodes[0], 1); } else { nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); } assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); + let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[0].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[0], 0); + } else { + check_added_monitors!(nodes[0], 1); } - check_added_monitors!(nodes[0], 1); - let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1684,8 +1724,9 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: } *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[1].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[1], 1); + let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + nodes[1].node.channel_monitor_updated(&outpoint, latest_update); + check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 31741970..2e4f49ce 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -18,9 +18,9 @@ use secp256k1; use ln::features::{ChannelFeatures, InitFeatures}; use ln::msgs; use ln::msgs::{DecodeError, OptionalField, DataLossProtect}; -use ln::channelmonitor::ChannelMonitor; -use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; -use ln::chan_utils::{LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep}; +use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; +use ln::chan_utils::{CounterpartyCommitmentSecrets, LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys}; use ln::chan_utils; use chain::chaininterface::{FeeEstimator,ConfirmationTarget}; use chain::transaction::OutPoint; @@ -35,6 +35,7 @@ use std; use std::default::Default; use std::{cmp,mem,fmt}; use std::sync::{Arc}; +use std::ops::Deref; #[cfg(test)] pub struct ChannelValueStat { @@ -240,11 +241,14 @@ pub(super) struct Channel { secp_ctx: Secp256k1, channel_value_satoshis: u64, + latest_monitor_update_id: u64, + #[cfg(not(test))] local_keys: ChanSigner, #[cfg(test)] pub(super) local_keys: ChanSigner, shutdown_pubkey: PublicKey, + destination_script: Script, // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction // generation start at 0 and count up...this simplifies some parts of implementation at the @@ -269,7 +273,7 @@ pub(super) struct Channel { monitor_pending_funding_locked: bool, monitor_pending_revoke_and_ack: bool, monitor_pending_commitment_signed: bool, - monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, + monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, // pending_update_fee is filled when sending and receiving update_fee @@ -303,6 +307,8 @@ pub(super) struct Channel { last_sent_closing_fee: Option<(u64, u64, Signature)>, // (feerate, fee, our_sig) + funding_txo: Option, + /// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this /// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full /// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we @@ -347,7 +353,10 @@ pub(super) struct Channel { their_shutdown_scriptpubkey: Option