X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=64c7de855fe803b9b74032959669ce348189f6e3;hb=077b8e8c32cf332eccbe8da2b4ca377b45e20fef;hp=c71013f950d9509f318ee21fd851ab582f3d5644;hpb=f1eb4639f860bec63d3e6a311a9128d56d9b2803;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index c71013f9..64c7de85 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -12,8 +12,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::network::serialize::BitcoinHash; -use bitcoin::util::hash::Sha256dHash; +use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; use secp256k1::key::{SecretKey,PublicKey}; use secp256k1::{Secp256k1,Message}; @@ -23,14 +22,15 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY}; use ln::router::{Route,RouteHop}; use ln::msgs; -use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder}; +use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError}; use chain::keysinterface::KeysInterface; +use util::config::UserConfig; use util::{byte_utils, events, internal_traits, rng}; use util::sha2::Sha256; -use util::ser::{Readable, Writeable}; +use util::ser::{Readable, ReadableArgs, Writeable, Writer}; use util::chacha20poly1305rfc::ChaCha20; use util::logger::Logger; use util::errors::APIError; @@ -41,11 +41,10 @@ use crypto::hmac::Hmac; use crypto::digest::Digest; use crypto::symmetriccipher::SynchronousStreamCipher; -use std::{ptr, mem}; -use std::collections::HashMap; -use std::collections::hash_map; +use std::{cmp, ptr, mem}; +use std::collections::{HashMap, hash_map, HashSet}; use std::io::Cursor; -use std::sync::{Mutex,MutexGuard,Arc}; +use std::sync::{Arc, Mutex, MutexGuard, RwLock}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::{Instant,Duration}; @@ -64,15 +63,15 @@ use std::time::{Instant,Duration}; mod channel_held_info { use ln::msgs; use ln::router::Route; + use ln::channelmanager::PaymentHash; use secp256k1::key::SecretKey; - use secp256k1::ecdh::SharedSecret; /// Stores the info we will need to send when we want to forward an HTLC onwards #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub struct PendingForwardHTLCInfo { pub(super) onion_packet: Option, - pub(super) incoming_shared_secret: SharedSecret, - pub(super) payment_hash: [u8; 32], + pub(super) incoming_shared_secret: [u8; 32], + pub(super) payment_hash: PaymentHash, pub(super) short_channel_id: u64, pub(super) amt_to_forward: u64, pub(super) outgoing_cltv_value: u32, @@ -92,15 +91,15 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub struct HTLCPreviousHopData { pub(super) short_channel_id: u64, pub(super) htlc_id: u64, - pub(super) incoming_packet_shared_secret: SharedSecret, + pub(super) incoming_packet_shared_secret: [u8; 32], } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), OutboundRoute { @@ -135,9 +134,24 @@ mod channel_held_info { } pub(super) use self::channel_held_info::*; +/// payment_hash type, use to cross-lock hop +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct PaymentHash(pub [u8;32]); +/// payment_preimage type, use to route payment between hop +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct PaymentPreimage(pub [u8;32]); + +type ShutdownResult = (Vec, Vec<(HTLCSource, PaymentHash)>); + +/// Error type returned across the channel_state mutex boundary. When an Err is generated for a +/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel +/// immediately (ie with no further calls on it made). Thus, this step happens inside a +/// channel_state lock. We then return the set of things that need to be done outside the lock in +/// this struct and call handle_error!() on it. + struct MsgHandleErrInternal { err: msgs::HandleError, - needs_channel_force_close: bool, + shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { #[inline] @@ -152,11 +166,15 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: false, + shutdown_finish: None, } } #[inline] - fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self { + fn from_no_close(err: msgs::HandleError) -> Self { + Self { err, shutdown_finish: None } + } + #[inline] + fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: HandleError { err, @@ -167,18 +185,10 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: true, + shutdown_finish: Some((shutdown_res, channel_update)), } } #[inline] - fn from_maybe_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: true } - } - #[inline] - fn from_no_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: false } - } - #[inline] fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { @@ -196,28 +206,7 @@ impl MsgHandleErrInternal { }), }, }, - needs_channel_force_close: false, - } - } - #[inline] - fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { - Self { - err: match err { - ChannelError::Ignore(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::IgnoreError), - }, - ChannelError::Close(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: msg.to_string() - }, - }), - }, - }, - needs_channel_force_close: true, + shutdown_finish: None, } } } @@ -244,6 +233,18 @@ struct HTLCForwardInfo { forward_info: PendingForwardHTLCInfo, } +/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should +/// be sent in the order they appear in the return value, however sometimes the order needs to be +/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order +/// they were originally sent). In those cases, this enum is also returned. +#[derive(Clone, PartialEq)] +pub(super) enum RAACommitmentOrder { + /// Send the CommitmentUpdate messages first + CommitmentFirst, + /// Send the RevokeAndACK message first + RevokeAndACKFirst, +} + struct ChannelHolder { by_id: HashMap<[u8; 32], Channel>, short_to_id: HashMap, @@ -256,14 +257,18 @@ struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - claimable_htlcs: HashMap<[u8; 32], Vec>, + claimable_htlcs: HashMap>, + /// Messages to send to peers - pushed to in the same lock that they are generated in (except + /// for broadcast messages, where ordering isn't as strict). + pending_msg_events: Vec, } struct MutChannelHolder<'a> { by_id: &'a mut HashMap<[u8; 32], Channel>, short_to_id: &'a mut HashMap, next_forward: &'a mut Instant, forward_htlcs: &'a mut HashMap>, - claimable_htlcs: &'a mut HashMap<[u8; 32], Vec>, + claimable_htlcs: &'a mut HashMap>, + pending_msg_events: &'a mut Vec, } impl ChannelHolder { fn borrow_parts(&mut self) -> MutChannelHolder { @@ -273,6 +278,7 @@ impl ChannelHolder { next_forward: &mut self.next_forward, forward_htlcs: &mut self.forward_htlcs, claimable_htlcs: &mut self.claimable_htlcs, + pending_msg_events: &mut self.pending_msg_events, } } } @@ -285,22 +291,45 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum /// /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through /// to individual Channels. +/// +/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for +/// all peers during write/read (though does not modify this instance, only the instance being +/// serialized). This will result in any channels which have not yet exchanged funding_created (ie +/// called funding_transaction_generated for outbound channels). +/// +/// Note that you can be a bit lazier about writing out ChannelManager than you can be with +/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before +/// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates +/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during +/// the serialization process). If the deserialized version is out-of-date compared to the +/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the +/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees). +/// +/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which +/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along +/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call +/// block_connected() to step towards your best block) upon deserialization before using the +/// object! pub struct ChannelManager { + default_configuration: UserConfig, genesis_hash: Sha256dHash, fee_estimator: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, - announce_channels_publicly: bool, - fee_proportional_millionths: u32, latest_block_height: AtomicUsize, + last_block_hash: Mutex, secp_ctx: Secp256k1, channel_state: Mutex, our_network_key: SecretKey, pending_events: Mutex>, + /// Used when we have to take a BIG lock to make sure everything is self-consistent. + /// Essentially just when we're serializing ourselves out. + /// Taken first everywhere where we are making changes before any other locks. + total_consistency_lock: RwLock<()>, keys_manager: Arc, @@ -312,16 +341,17 @@ pub struct ChannelManager { /// ie the node we forwarded the payment on to should always have enough room to reliably time out /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). -const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO? +const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that -// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have -// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the -// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC. +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS + +// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within +// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it +// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel +// on-chain to time out the HTLC. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. @@ -366,29 +396,141 @@ pub struct ChannelDetails { pub user_id: u64, } +macro_rules! handle_error { + ($self: ident, $internal: expr, $their_node_id: expr) => { + match $internal { + Ok(msg) => Ok(msg), + Err(MsgHandleErrInternal { err, shutdown_finish }) => { + if let Some((shutdown_res, update_option)) = shutdown_finish { + $self.finish_force_close_channel(shutdown_res); + if let Some(update) = update_option { + let mut channel_state = $self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + } + Err(err) + }, + } + } +} + +macro_rules! break_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! try_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! return_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { + if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + // TODO: $failed_fails is dropped here, which will cause other channels to hit the + // chain in a confused state! We need to move them into the ChannelMonitor which + // will be responsible for failing backwards once things confirm on-chain. + // It's ok that we drop $failed_forwards here - at this point we'd rather they + // broadcast HTLC-Timeout and pay the associated fees to get their funds back than + // us bother trying to claim it just to forward on to another peer. If we're + // splitting hairs we'd prefer to claim payments that were to us, but we haven't + // given up the preimage yet, so might as well just wait until the payment is + // retried, avoiding the on-chain fees. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + }, + } + } +} + +// Does not break in case of TemporaryFailure! +macro_rules! maybe_break_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + }, + } + } +} + impl ChannelManager { /// Constructs a new ChannelManager to hold several channels and route between them. /// /// This is the main "logic hub" for all channel-related actions, and implements /// ChannelMessageHandler. /// - /// fee_proportional_millionths is an optional fee to charge any payments routed through us. /// Non-proportional fees are fixed according to our risk using the provided fee estimator. /// /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`! - pub fn new(fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc, keys_manager: Arc) -> Result, secp256k1::Error> { + pub fn new(network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc,keys_manager: Arc, config: UserConfig) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); let res = Arc::new(ChannelManager { + default_configuration: config.clone(), genesis_hash: genesis_block(network).header.bitcoin_hash(), fee_estimator: feeest.clone(), monitor: monitor.clone(), chain_monitor, tx_broadcaster, - announce_channels_publicly, - fee_proportional_millionths, - latest_block_height: AtomicUsize::new(0), //TODO: Get an init value (generally need to replay recent chain on chain_monitor registration) + latest_block_height: AtomicUsize::new(0), //TODO: Get an init value + last_block_hash: Mutex::new(Default::default()), secp_ctx, channel_state: Mutex::new(ChannelHolder{ @@ -397,10 +539,12 @@ impl ChannelManager { next_forward: Instant::now(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), + pending_msg_events: Vec::new(), }), our_network_key: keys_manager.get_node_secret(), pending_events: Mutex::new(Vec::new()), + total_consistency_lock: RwLock::new(()), keys_manager, @@ -418,13 +562,20 @@ impl ChannelManager { /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you /// may wish to avoid using 0 for user_id here. /// - /// If successful, will generate a SendOpenChannel event, so you should probably poll + /// If successful, will generate a SendOpenChannel message event, so you should probably poll /// PeerManager::process_events afterwards. /// - /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k + /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is + /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000. pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> { - let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?; + if channel_value_satoshis < 1000 { + return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" }); + } + + let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?; let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator); + + let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.entry(channel.channel_id()) { hash_map::Entry::Occupied(_) => { @@ -436,9 +587,7 @@ impl ChannelManager { }, hash_map::Entry::Vacant(entry) => { entry.insert(channel); } } - - let mut events = self.pending_events.lock().unwrap(); - events.push(events::Event::SendOpenChannel { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg: res, }); @@ -488,27 +637,32 @@ impl ChannelManager { /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. /// - /// May generate a SendShutdown event on success, which should be relayed. + /// May generate a SendShutdown message event on success, which should be relayed. pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { - let (mut res, node_id, chan_option) = { + let _ = self.total_consistency_lock.read().unwrap(); + + let (mut failed_htlcs, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); match channel_state.by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let res = chan_entry.get_mut().get_shutdown()?; + let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?; + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: chan_entry.get().get_their_node_id(), + msg: shutdown_msg + }); if chan_entry.get().is_shutdown() { if let Some(short_id) = chan_entry.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - (res, chan_entry.get().get_their_node_id(), Some(chan_entry.remove_entry().1)) - } else { (res, chan_entry.get().get_their_node_id(), None) } + (failed_htlcs, Some(chan_entry.remove_entry().1)) + } else { (failed_htlcs, None) } }, hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"}) } }; - for htlc_source in res.1.drain(..) { - // unknown_next_peer...I dunno who that is anymore.... - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + for htlc_source in failed_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } let chan_update = if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update(&chan) { @@ -516,42 +670,33 @@ impl ChannelManager { } else { None } } else { None }; - let mut events = self.pending_events.lock().unwrap(); if let Some(update) = chan_update { - events.push(events::Event::BroadcastChannelUpdate { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - events.push(events::Event::SendShutdown { - node_id, - msg: res.0 - }); Ok(()) } #[inline] - fn finish_force_close_channel(&self, shutdown_res: (Vec, Vec<(HTLCSource, [u8; 32])>)) { + fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (local_txn, mut failed_htlcs) = shutdown_res; + log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { - // unknown_next_peer...I dunno who that is anymore.... - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } for tx in local_txn { self.tx_broadcaster.broadcast_transaction(&tx); } - //TODO: We need to have a way where outbound HTLC claims can result in us claiming the - //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards). - //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and - //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after - //timeouts are hit and our claims confirm). - //TODO: In any case, we need to make sure we remove any pending htlc tracking (via - //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel } /// Force closes a channel, immediately broadcasting the latest local commitment transaction to /// the chain and rejecting new HTLCs on the given channel. pub fn force_close_channel(&self, channel_id: &[u8; 32]) { + let _ = self.total_consistency_lock.read().unwrap(); + let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -564,10 +709,11 @@ impl ChannelManager { return; } }; + log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown()); - let mut events = self.pending_events.lock().unwrap(); if let Ok(update) = self.get_channel_update(&chan) { - events.push(events::Event::BroadcastChannelUpdate { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -581,35 +727,9 @@ impl ChannelManager { } } - fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { - match err { - ChannelMonitorUpdateErr::PermanentFailure => { - let mut chan = { - let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - if let Some(short_id) = chan.get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - chan - }; - mem::drop(channel_state_lock); - self.finish_force_close_channel(chan.force_shutdown()); - let mut events = self.pending_events.lock().unwrap(); - if let Ok(update) = self.get_channel_update(&chan) { - events.push(events::Event::BroadcastChannelUpdate { - msg: update - }); - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - channel.monitor_update_failed(reason); - }, - } - } - #[inline] - fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) { + fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) { + assert_eq!(shared_secret.len(), 32); ({ let mut hmac = Hmac::new(Sha256::new(), &[0x72, 0x68, 0x6f]); // rho hmac.input(&shared_secret[..]); @@ -627,7 +747,8 @@ impl ChannelManager { } #[inline] - fn gen_um_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] { + fn gen_um_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] { + assert_eq!(shared_secret.len(), 32); let mut hmac = Hmac::new(Sha256::new(), &[0x75, 0x6d]); // um hmac.input(&shared_secret[..]); let mut res = [0; 32]; @@ -636,7 +757,8 @@ impl ChannelManager { } #[inline] - fn gen_ammag_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] { + fn gen_ammag_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] { + assert_eq!(shared_secret.len(), 32); let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag hmac.input(&shared_secret[..]); let mut res = [0; 32]; @@ -675,7 +797,7 @@ impl ChannelManager { let mut res = Vec::with_capacity(route.hops.len()); Self::construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _| { - let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret); + let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret[..]); res.push(OnionKeys { #[cfg(test)] @@ -748,7 +870,7 @@ impl ChannelManager { } const ZERO:[u8; 21*65] = [0; 21*65]; - fn construct_onion_packet(mut payloads: Vec, onion_keys: Vec, associated_data: &[u8; 32]) -> msgs::OnionPacket { + fn construct_onion_packet(mut payloads: Vec, onion_keys: Vec, associated_data: &PaymentHash) -> msgs::OnionPacket { let mut buf = Vec::with_capacity(21*65); buf.resize(21*65, 0); @@ -785,7 +907,7 @@ impl ChannelManager { let mut hmac = Hmac::new(Sha256::new(), &keys.mu); hmac.input(&packet_data); - hmac.input(&associated_data[..]); + hmac.input(&associated_data.0[..]); hmac.raw_result(&mut hmac_res); } @@ -799,7 +921,7 @@ impl ChannelManager { /// Encrypts a failure packet. raw_packet can either be a /// msgs::DecodedOnionErrorPacket.encode() result or a msgs::OnionErrorPacket.data element. - fn encrypt_failure_packet(shared_secret: &SharedSecret, raw_packet: &[u8]) -> msgs::OnionErrorPacket { + fn encrypt_failure_packet(shared_secret: &[u8], raw_packet: &[u8]) -> msgs::OnionErrorPacket { let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); let mut packet_crypted = Vec::with_capacity(raw_packet.len()); @@ -811,7 +933,8 @@ impl ChannelManager { } } - fn build_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket { + fn build_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket { + assert_eq!(shared_secret.len(), 32); assert!(failure_data.len() <= 256 - 2); let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); @@ -842,7 +965,7 @@ impl ChannelManager { } #[inline] - fn build_first_hop_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket { + fn build_first_hop_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket { let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data); ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..]) } @@ -870,7 +993,11 @@ impl ChannelManager { })), self.channel_state.lock().unwrap()); } - let shared_secret = SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key); + let shared_secret = { + let mut arr = [0; 32]; + arr.copy_from_slice(&SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]); + arr + }; let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret); let mut channel_state = None; @@ -902,7 +1029,7 @@ impl ChannelManager { let mut hmac = Hmac::new(Sha256::new(), &mu); hmac.input(&msg.onion_routing_packet.hop_data); - hmac.input(&msg.payment_hash); + hmac.input(&msg.payment_hash.0[..]); if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) { return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!()); } @@ -947,7 +1074,7 @@ impl ChannelManager { onion_packet: None, payment_hash: msg.payment_hash.clone(), short_channel_id: 0, - incoming_shared_secret: shared_secret.clone(), + incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.data.amt_to_forward, outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, }) @@ -961,7 +1088,7 @@ impl ChannelManager { let blinding_factor = { let mut sha = Sha256::new(); sha.input(&new_pubkey.serialize()[..]); - sha.input(&shared_secret[..]); + sha.input(&shared_secret); let mut res = [0u8; 32]; sha.result(&mut res); match SecretKey::from_slice(&self.secp_ctx, &res) { @@ -987,7 +1114,7 @@ impl ChannelManager { onion_packet: Some(outgoing_packet), payment_hash: msg.payment_hash.clone(), short_channel_id: next_hop_data.data.short_channel_id, - incoming_shared_secret: shared_secret.clone(), + incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.data.amt_to_forward, outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, }) @@ -1017,7 +1144,7 @@ impl ChannelManager { if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap()))); } - let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); + let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); } @@ -1071,7 +1198,7 @@ impl ChannelManager { cltv_expiry_delta: CLTV_EXPIRY_DELTA, htlc_minimum_msat: chan.get_our_htlc_minimum_msat(), fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator), - fee_proportional_millionths: self.fee_proportional_millionths, + fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; @@ -1096,11 +1223,21 @@ impl ChannelManager { /// payment_preimage tracking (which you should already be doing as they represent "proof of /// payment") and prevent double-sends yourself. /// - /// May generate a SendHTLCs event on success, which should be relayed. + /// May generate a SendHTLCs message event on success, which should be relayed. /// /// Raises APIError::RoutError when invalid route or forward parameter - /// (cltv_delta, fee, node public key) is specified - pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { + /// (cltv_delta, fee, node public key) is specified. + /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// (including due to previous monitor update failure or new permanent monitor update failure). + /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// relevant updates. + /// + /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed + /// and you may wish to retry via a different route immediately. + /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably + /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry + /// the payment via a different route unless you intend to pay twice! + pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> { if route.hops.len() < 1 || route.hops.len() > 20 { return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); } @@ -1111,11 +1248,7 @@ impl ChannelManager { } } - let session_priv = SecretKey::from_slice(&self.secp_ctx, &{ - let mut session_key = [0; 32]; - rng::fill_bytes(&mut session_key); - session_key - }).expect("RNG is bad!"); + let session_priv = self.keys_manager.get_session_key(); let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; @@ -1124,105 +1257,115 @@ impl ChannelManager { let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?; let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); - let (first_hop_node_id, update_add, commitment_signed) = { - let mut channel_state = self.channel_state.lock().unwrap(); + let _ = self.total_consistency_lock.read().unwrap(); + + let err: Result<(), _> = loop { + let mut channel_lock = self.channel_state.lock().unwrap(); - let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { + let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), Some(id) => id.clone(), }; - let res = { - let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { + let channel_state = channel_lock.borrow_parts(); + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { + if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey { return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); } - chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { route: route.clone(), session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, - }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? - }; - match res { + }, onion_packet), channel_state, chan) + } { Some((update_add, commitment_signed, chan_monitor)) => { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resent the commitment update once we unfree monitor + // updating, so we have to take special care that we don't return + // something else in case we will resend later! return Err(APIError::MonitorUpdateFailed); } - Some((update_add, commitment_signed)) + + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: route.hops.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); }, - None => None, + None => {}, } - }; - - let first_hop_node_id = route.hops.first().unwrap().pubkey; - - match res { - Some((update_add, commitment_signed)) => { - (first_hop_node_id, update_add, commitment_signed) - }, - None => return Ok(()), - } + } else { unreachable!(); } + return Ok(()); }; - let mut events = self.pending_events.lock().unwrap(); - events.push(events::Event::UpdateHTLCs { - node_id: first_hop_node_id, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, + match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: route.hops.first().unwrap().pubkey, + action: e.action, + }); + } + Err(APIError::ChannelUnavailable { err: e.err }) }, - }); - Ok(()) + } } /// Call this upon creation of a funding transaction for the given channel. /// + /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs + /// or your counterparty can steal your funds! + /// /// Panics if a funding transaction has already been provided for this channel. /// /// May panic if the funding_txo is duplicative with some other channel (note that this should /// be trivially prevented by using unique funding transaction keys per-channel). pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { - macro_rules! add_pending_event { - ($event: expr) => { - { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push($event); - } - } - } + let _ = self.total_consistency_lock.read().unwrap(); let (chan, msg, chan_monitor) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.remove(temporary_channel_id) { - Some(mut chan) => { - match chan.get_outbound_funding_created(funding_txo) { - Ok(funding_msg) => { - (chan, funding_msg.0, funding_msg.1) - }, - Err(e) => { - log_error!(self, "Got bad signatures: {}!", e.err); - mem::drop(channel_state); - add_pending_event!(events::Event::HandleError { - node_id: chan.get_their_node_id(), - action: e.action, - }); - return; - }, - } + let (res, chan) = { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.remove(temporary_channel_id) { + Some(mut chan) => { + (chan.get_outbound_funding_created(funding_txo) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + } else { unreachable!(); }) + , chan) + }, + None => return + } + }; + match handle_error!(self, res, chan.get_their_node_id()) { + Ok(funding_msg) => { + (chan, funding_msg.0, funding_msg.1) + }, + Err(e) => { + log_error!(self, "Got bad signatures: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_their_node_id(), + action: e.action, + }); + return; }, - None => return } }; // Because we have exclusive ownership of the channel here we can release the channel_state @@ -1230,12 +1373,12 @@ impl ChannelManager { if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } - add_pending_event!(events::Event::SendFundingCreated { + + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id: chan.get_their_node_id(), msg: msg, }); - - let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.entry(chan.channel_id()) { hash_map::Entry::Occupied(_) => { panic!("Generated duplicate funding txid?"); @@ -1269,6 +1412,8 @@ impl ChannelManager { /// Should only really ever be called in response to an PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { + let _ = self.total_consistency_lock.read().unwrap(); + let mut new_events = Vec::new(); let mut failed_forwards = Vec::new(); { @@ -1332,9 +1477,7 @@ impl ChannelManager { let (commitment_msg, monitor) = match forward_chan.send_commitment() { Ok(res) => res, Err(e) => { - if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action { - } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action { - } else { + if let ChannelError::Ignore(_) = e { panic!("Stated return value requirements in send_commitment() were not met"); } //TODO: Handle...this is bad! @@ -1342,9 +1485,9 @@ impl ChannelManager { }, }; if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!();// but def dont push the event... + unimplemented!(); } - new_events.push(events::Event::UpdateHTLCs { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), updates: msgs::CommitmentUpdate { update_add_htlcs: add_htlc_msgs, @@ -1389,7 +1532,9 @@ impl ChannelManager { } /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event. - pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool { + pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, reason: PaymentFailReason) -> bool { + let _ = self.total_consistency_lock.read().unwrap(); + let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); if let Some(mut sources) = removed_source { @@ -1407,66 +1552,60 @@ impl ChannelManager { /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { match source { HTLCSource::OutboundRoute { .. } => { - mem::drop(channel_state); + log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); + mem::drop(channel_state_lock); if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error { let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone()); - let mut pending_events = self.pending_events.lock().unwrap(); - if let Some(channel_update) = channel_update { - pending_events.push(events::Event::PaymentFailureNetworkUpdate { - update: channel_update, - }); + if let Some(update) = channel_update { + self.channel_state.lock().unwrap().pending_msg_events.push( + events::MessageSendEvent::PaymentFailureNetworkUpdate { + update, + } + ); } - pending_events.push(events::Event::PaymentFailed { + self.pending_events.lock().unwrap().push(events::Event::PaymentFailed { payment_hash: payment_hash.clone(), rejected_by_dest: !payment_retryable, }); } else { - panic!("should have onion error packet here"); + //TODO: Pass this back (see GH #243) + self.pending_events.lock().unwrap().push(events::Event::PaymentFailed { + payment_hash: payment_hash.clone(), + rejected_by_dest: false, // We failed it ourselves, can't blame them + }); } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => { let err_packet = match onion_error { HTLCFailReason::Reason { failure_code, data } => { + log_trace!(self, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) }, HTLCFailReason::ErrorPacket { err } => { + log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built ErrorPacket", log_bytes!(payment_hash.0)); ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) } }; - let (node_id, fail_msgs) = { - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => return - }; + let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) { - Ok(Some((msg, commitment_msg, chan_monitor))) => { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - (chan.get_their_node_id(), Some((msg, commitment_msg))) - }, - Ok(None) => (chan.get_their_node_id(), None), - Err(_e) => { - //TODO: Do something with e? - return; - }, - } + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => return }; - match fail_msgs { - Some((msg, commitment_msg)) => { - mem::drop(channel_state); - - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::UpdateHTLCs { - node_id, + let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); + match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) { + Ok(Some((msg, commitment_msg, chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_their_node_id(), updates: msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), update_fulfill_htlcs: Vec::new(), @@ -1477,7 +1616,11 @@ impl ChannelManager { }, }); }, - None => {}, + Ok(None) => {}, + Err(_e) => { + //TODO: Do something with e? + return; + }, } }, } @@ -1488,11 +1631,13 @@ impl ChannelManager { /// should probably kick the net layer to go send messages if this returns true! /// /// May panic if called except in response to a PaymentReceived event. - pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool { + pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool { let mut sha = Sha256::new(); - sha.input(&payment_preimage); - let mut payment_hash = [0; 32]; - sha.result(&mut payment_hash); + sha.input(&payment_preimage.0[..]); + let mut payment_hash = PaymentHash([0; 32]); + sha.result(&mut payment_hash.0[..]); + + let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); @@ -1504,10 +1649,10 @@ impl ChannelManager { true } else { false } } - fn claim_funds_internal(&self, mut channel_state: MutexGuard, source: HTLCSource, payment_preimage: [u8; 32]) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage) { match source { HTLCSource::OutboundRoute { .. } => { - mem::drop(channel_state); + mem::drop(channel_state_lock); let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::PaymentSent { payment_preimage @@ -1515,49 +1660,46 @@ impl ChannelManager { }, HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let (node_id, fulfill_msgs) = { - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel already hit the chain and that's - // why its missing. - return - } - }; + let channel_state = channel_state_lock.borrow_parts(); + + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel already hit the chain and that's + // why its missing. + return + } + }; - let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok((msgs, Some(chan_monitor))) => { + let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); + match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { + Ok((msgs, monitor_option)) => { + if let Some(chan_monitor) = monitor_option { if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!();// but def dont push the event... } - (chan.get_their_node_id(), msgs) - }, - Ok((msgs, None)) => (chan.get_their_node_id(), msgs), - Err(_e) => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel may be about to hit the chain. - //TODO: Do something with e? - return - }, - } - }; - - mem::drop(channel_state); - if let Some((msg, commitment_msg)) = fulfill_msgs { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::UpdateHTLCs { - node_id: node_id, - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, } - }); + if let Some((msg, commitment_signed)) = msgs { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + }, + Err(_e) => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel may be about to hit the chain. + //TODO: Do something with e? + return + }, } }, } @@ -1572,27 +1714,35 @@ impl ChannelManager { /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update /// operation. pub fn test_restore_channel_monitor(&self) { - let mut new_events = Vec::new(); let mut close_results = Vec::new(); let mut htlc_forwards = Vec::new(); let mut htlc_failures = Vec::new(); + let _ = self.total_consistency_lock.read().unwrap(); { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); let short_to_id = channel_state.short_to_id; + let pending_msg_events = channel_state.pending_msg_events; channel_state.by_id.retain(|_, channel| { if channel.is_awaiting_monitor_update() { let chan_monitor = channel.channel_monitor(); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { + // TODO: There may be some pending HTLCs that we intended to fail + // backwards when a monitor update failed. We should make sure + // knowledge of those gets moved into the appropriate in-memory + // ChannelMonitor and they get failed backwards once we get + // on-chain confirmations. + // Note I think #198 addresses this, so once its merged a test + // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } close_results.push(channel.force_shutdown()); if let Ok(update) = self.get_channel_update(&channel) { - new_events.push(events::Event::BroadcastChannelUpdate { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -1609,7 +1759,7 @@ impl ChannelManager { macro_rules! handle_cs { () => { if let Some(update) = commitment_update { - new_events.push(events::Event::UpdateHTLCs { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: channel.get_their_node_id(), updates: update, }); @@ -1617,7 +1767,7 @@ impl ChannelManager { } } macro_rules! handle_raa { () => { if let Some(revoke_and_ack) = raa { - new_events.push(events::Event::SendRevokeAndACK { + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: channel.get_their_node_id(), msg: revoke_and_ack, }); @@ -1647,41 +1797,45 @@ impl ChannelManager { for res in close_results.drain(..) { self.finish_force_close_channel(res); } - - self.pending_events.lock().unwrap().append(&mut new_events); } - fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result { + fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { if msg.chain_hash != self.genesis_hash { return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone())); } - let mut channel_state = self.channel_state.lock().unwrap(); - if channel_state.by_id.contains_key(&msg.temporary_channel_id) { - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())); - } - let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)) + let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; - let accept_msg = channel.get_accept_channel(); - channel_state.by_id.insert(channel.channel_id(), channel); - Ok(accept_msg) + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + match channel_state.by_id.entry(channel.channel_id()) { + hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())), + hash_map::Entry::Vacant(entry) => { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: their_node_id.clone(), + msg: channel.get_accept_channel(), + }); + entry.insert(channel); + } + } + Ok(()) } fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.temporary_channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; - (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, //TODO: same as above - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1694,23 +1848,17 @@ impl ChannelManager { Ok(()) } - fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result { - let (chan, funding_msg, monitor_update) = { - let mut channel_state = self.channel_state.lock().unwrap(); + fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { + let ((funding_msg, monitor_update), chan) = { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - match chan.get_mut().funding_created(msg) { - Ok((funding_msg, monitor_update)) => { - (chan.remove(), funding_msg, monitor_update) - }, - Err(e) => { - return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) - } - } + (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } @@ -1720,34 +1868,40 @@ impl ChannelManager { if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } - let mut channel_state = self.channel_state.lock().unwrap(); + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); match channel_state.by_id.entry(funding_msg.channel_id) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + node_id: their_node_id.clone(), + msg: funding_msg, + }); e.insert(chan); } } - Ok(funding_msg) + Ok(()) } fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let (funding_txo, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } - (chan.get_funding_txo().unwrap(), chan.get_user_id()) + (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1758,24 +1912,30 @@ impl ChannelManager { Ok(()) } - fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result, MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - return Ok(self.get_announcement_sigs(chan)); - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }; + try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); + if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: their_node_id.clone(), + msg: announcement_sigs, + }); + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } } - fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option, Option), MsgHandleErrInternal> { - let (mut res, chan_option) = { + fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { + let (mut dropped_htlcs, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1785,34 +1945,45 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let res = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry); + if let Some(msg) = shutdown { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: their_node_id.clone(), + msg, + }); + } + if let Some(msg) = closing_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: their_node_id.clone(), + msg, + }); + } if chan_entry.get().is_shutdown() { if let Some(short_id) = chan_entry.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - (res, Some(chan_entry.remove_entry().1)) - } else { (res, None) } + (dropped_htlcs, Some(chan_entry.remove_entry().1)) + } else { (dropped_htlcs, None) } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - for htlc_source in res.2.drain(..) { - // unknown_next_peer...I dunno who that is anymore.... - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + for htlc_source in dropped_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update(&chan) { - let mut events = self.pending_events.lock().unwrap(); - events.push(events::Event::BroadcastChannelUpdate { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } } - Ok((res.0, res.1)) + Ok(()) } - fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result, MsgHandleErrInternal> { - let (res, chan_option) = { + fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { + let (tx, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); match channel_state.by_id.entry(msg.channel_id.clone()) { @@ -1821,8 +1992,14 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let res = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - if res.1.is_some() { + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry); + if let Some(msg) = closing_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: their_node_id.clone(), + msg, + }); + } + if tx.is_some() { // We're done with this channel, we've got a signed closing transaction and // will send the closing_signed back to the remote peer upon return. This // also implies there are no pending HTLCs left on the channel, so we can @@ -1831,24 +2008,24 @@ impl ChannelManager { if let Some(short_id) = chan_entry.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - (res, Some(chan_entry.remove_entry().1)) - } else { (res, None) } + (tx, Some(chan_entry.remove_entry().1)) + } else { (tx, None) } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Some(broadcast_tx) = res.1 { + if let Some(broadcast_tx) = tx { self.tx_broadcaster.broadcast_transaction(&broadcast_tx); } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update(&chan) { - let mut events = self.pending_events.lock().unwrap(); - events.push(events::Event::BroadcastChannelUpdate { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } } - Ok(res.0) + Ok(()) } fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> { @@ -1861,38 +2038,60 @@ impl ChannelManager { //encrypted with the same key. Its not immediately obvious how to usefully exploit that, //but we should prevent it anyway. - let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); + let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { - return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Channel not yet available for receiving HTLCs", action: Some(msgs::ErrorAction::IgnoreError)})); + if !chan.get().is_usable() { + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { + let chan_update = self.get_channel_update(chan.get()); + pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason: if let Ok(update) = chan_update { + ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &update.encode_with_len()[..]) + } else { + // This can only happen if the channel isn't in the fully-funded + // state yet, implying our counterparty is trying to route payments + // over the channel back to themselves (cause no one else should + // know the short_id is a lightning channel yet). We should have no + // problem just calling this unknown_next_peer + ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[]) + }, + })); + } } - chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - chan.update_fulfill_htlc(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + let mut channel_lock = self.channel_state.lock().unwrap(); + let htlc_source = { + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } }; - self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); Ok(()) } @@ -1924,7 +2123,7 @@ impl ChannelManager { let amt_to_forward = htlc_msat - route_hop.fee_msat; htlc_msat = amt_to_forward; - let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); + let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret[..]); let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); decryption_tmp.resize(packet_decrypted.len(), 0); @@ -1935,7 +2134,7 @@ impl ChannelManager { let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey; if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) { - let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); + let um = ChannelManager::gen_um_from_shared_secret(&shared_secret[..]); let mut hmac = Hmac::new(Sha256::new(), &um); hmac.input(&err_packet.encode()[32..]); let mut calc_tag = [0u8; 32]; @@ -2094,59 +2293,82 @@ impl ChannelManager { } fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }?; + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } Ok(()) } fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if (msg.failure_code & 0x8000) != 0 { - return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id)); + if (msg.failure_code & 0x8000) == 0 { + try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); Ok(()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } - fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option), MsgHandleErrInternal> { - let (revoke_and_ack, commitment_signed) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - (revoke_and_ack, commitment_signed) - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - } - }; - Ok((revoke_and_ack, commitment_signed)) + fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + //TODO: Rebroadcast closing_signed if present on monitor update restoration + } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: their_node_id.clone(), + msg: revoke_and_ack, + }); + if let Some(msg) = commitment_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: their_node_id.clone(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: msg, + }, + }); + } + if let Some(msg) = closing_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: their_node_id.clone(), + msg, + }); + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } } #[inline] @@ -2182,22 +2404,36 @@ impl ChannelManager { } } - fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result, MsgHandleErrInternal> { - let ((res, pending_forwards, mut pending_failures), short_channel_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { + let (pending_forwards, mut pending_failures, short_channel_id) = { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (res, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); + } + if let Some(updates) = commitment_update { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: their_node_id.clone(), + updates, + }); + } + if let Some(msg) = closing_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: their_node_id.clone(), + msg, + }); } - ((res, pending_forwards, pending_failures), chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) + (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; for failure in pending_failures.drain(..) { @@ -2205,85 +2441,135 @@ impl ChannelManager { } self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]); - Ok(res) + Ok(()) } fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { - let (chan_announcement, chan_update) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - if !chan.is_usable() { - return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); - } + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + if !chan.get().is_usable() { + return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); + } - let our_node_id = self.get_our_node_id(); - let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let our_node_id = self.get_our_node_id(); + let (announcement, our_bitcoin_sig) = + try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); - let were_node_one = announcement.node_id_1 == our_node_id; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); - let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id); - secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action); - secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action); + let were_node_one = announcement.node_id_1 == our_node_id; + let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || + self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { + try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); + } - let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); + let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); - (msgs::ChannelAnnouncement { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: msgs::ChannelAnnouncement { node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature }, node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig }, bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature }, bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig }, contents: announcement, - }, self.get_channel_update(chan).unwrap()) // can only fail if we're not in a ready state - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - } - }; - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::BroadcastChannelAnnouncement { msg: chan_announcement, update_msg: chan_update }); + }, + update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state + }); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } Ok(()) } - fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), MsgHandleErrInternal> { - let res = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Some(monitor) = channel_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) = + try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan); + if let Some(monitor) = channel_monitor { + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + if revoke_and_ack.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + if commitment_update.is_none() { + order = RAACommitmentOrder::RevokeAndACKFirst; } + return_monitor_err!(self, e, channel_state, chan, order); + //TODO: Resend the funding_locked if needed once we get the monitor running again } - Ok((funding_locked, revoke_and_ack, commitment_update, order)) - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - } - }; - - res + } + if let Some(msg) = funding_locked { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: their_node_id.clone(), + msg + }); + } + macro_rules! send_raa { () => { + if let Some(msg) = revoke_and_ack { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: their_node_id.clone(), + msg + }); + } + } } + macro_rules! send_cu { () => { + if let Some(updates) = commitment_update { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: their_node_id.clone(), + updates + }); + } + } } + match order { + RAACommitmentOrder::RevokeAndACKFirst => { + send_raa!(); + send_cu!(); + }, + RAACommitmentOrder::CommitmentFirst => { + send_cu!(); + send_raa!(); + }, + } + if let Some(msg) = shutdown { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: their_node_id.clone(), + msg, + }); + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } } /// Begin Update fee process. Allowed only on an outbound channel. @@ -2292,46 +2578,111 @@ impl ChannelManager { /// Note: This API is likely to change! #[doc(hidden)] pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&channel_id) { - None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), - Some(chan) => { - if !chan.is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); - } - if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let _ = self.total_consistency_lock.read().unwrap(); + let their_node_id; + let err: Result<(), _> = loop { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + + match channel_state.by_id.entry(channel_id) { + hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + hash_map::Entry::Occupied(mut chan) => { + if !chan.get().is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); } - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, + if chan.get().is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); + } + their_node_id = chan.get().get_their_node_id(); + if let Some((update_fee, commitment_signed, chan_monitor)) = + break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan) + { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + } + }, + } + return Ok(()) + }; + + match handle_error!(self, err, their_node_id) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, }); } + Err(APIError::APIMisuseError { err: e.err }) }, } - Ok(()) + } +} + +impl events::MessageSendEventsProvider for ChannelManager { + fn get_and_clear_pending_msg_events(&self) -> Vec { + // TODO: Event release to users and serialization is currently race-y: its very easy for a + // user to serialize a ChannelManager with pending events in it and lose those events on + // restart. This is doubly true for the fail/fulfill-backs from monitor events! + { + //TODO: This behavior should be documented. + for htlc_update in self.monitor.fetch_pending_htlc_updated() { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } else { + log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + } + } + } + + let mut ret = Vec::new(); + let mut channel_state = self.channel_state.lock().unwrap(); + mem::swap(&mut ret, &mut channel_state.pending_msg_events); + ret } } impl events::EventsProvider for ChannelManager { fn get_and_clear_pending_events(&self) -> Vec { - let mut pending_events = self.pending_events.lock().unwrap(); + // TODO: Event release to users and serialization is currently race-y: its very easy for a + // user to serialize a ChannelManager with pending events in it and lose those events on + // restart. This is doubly true for the fail/fulfill-backs from monitor events! + { + //TODO: This behavior should be documented. + for htlc_update in self.monitor.fetch_pending_htlc_updated() { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } else { + log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + } + } + } + let mut ret = Vec::new(); + let mut pending_events = self.pending_events.lock().unwrap(); mem::swap(&mut ret, &mut *pending_events); ret } @@ -2339,35 +2690,41 @@ impl events::EventsProvider for ChannelManager { impl ChainListener for ChannelManager { fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) { - let mut new_events = Vec::new(); + let header_hash = header.bitcoin_hash(); + log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len()); + let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); let short_to_id = channel_state.short_to_id; + let pending_msg_events = channel_state.pending_msg_events; channel_state.by_id.retain(|_, channel| { let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched); if let Ok(Some(funding_locked)) = chan_res { - let announcement_sigs = self.get_announcement_sigs(channel); - new_events.push(events::Event::SendFundingLocked { + pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { node_id: channel.get_their_node_id(), msg: funding_locked, - announcement_sigs: announcement_sigs }); + if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_their_node_id(), + msg: announcement_sigs, + }); + } short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); } else if let Err(e) = chan_res { - new_events.push(events::Event::HandleError { + pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_their_node_id(), - action: e.action, + action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }), }); - if channel.is_shutdown() { - return false; - } + return false; } if let Some(funding_txo) = channel.get_funding_txo() { for tx in txn_matched { for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { + log_trace!(self, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id())); if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -2376,7 +2733,7 @@ impl ChainListener for ChannelManager { // some kind of SPV attack, though we expect these to be dropped. failed_channels.push(channel.force_shutdown()); if let Ok(update) = self.get_channel_update(&channel) { - new_events.push(events::Event::BroadcastChannelUpdate { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -2395,7 +2752,7 @@ impl ChainListener for ChannelManager { // hurt anything, but does make tests a bit simpler). failed_channels.last_mut().unwrap().0 = Vec::new(); if let Ok(update) = self.get_channel_update(&channel) { - new_events.push(events::Event::BroadcastChannelUpdate { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -2407,21 +2764,19 @@ impl ChainListener for ChannelManager { for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } - let mut pending_events = self.pending_events.lock().unwrap(); - for funding_locked in new_events.drain(..) { - pending_events.push(funding_locked); - } self.latest_block_height.store(height as usize, Ordering::Release); + *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; } /// We force-close the channel without letting our counterparty participate in the shutdown fn block_disconnected(&self, header: &BlockHeader) { - let mut new_events = Vec::new(); + let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); let short_to_id = channel_state.short_to_id; + let pending_msg_events = channel_state.pending_msg_events; channel_state.by_id.retain(|_, v| { if v.block_disconnected(header) { if let Some(short_id) = v.get_short_channel_id() { @@ -2429,7 +2784,7 @@ impl ChainListener for ChannelManager { } failed_channels.push(v.force_shutdown()); if let Ok(update) = self.get_channel_update(&v) { - new_events.push(events::Event::BroadcastChannelUpdate { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -2442,123 +2797,104 @@ impl ChainListener for ChannelManager { for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } - if !new_events.is_empty() { - let mut pending_events = self.pending_events.lock().unwrap(); - for funding_locked in new_events.drain(..) { - pending_events.push(funding_locked); - } - } self.latest_block_height.fetch_sub(1, Ordering::AcqRel); - } -} - -macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr) => { - match $internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, needs_channel_force_close }) => { - if needs_channel_force_close { - match &err.action { - &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {}, - &Some(msgs::ErrorAction::IgnoreError) => {}, - &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &None => {}, - } - } - Err(err) - }, - } + *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash(); } } impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) - fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result { + fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id) } fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id) } - fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result { + fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id) } fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id) } - fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result, HandleError> { + fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id) } - fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option, Option), HandleError> { + fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id) } - fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result, HandleError> { + fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id) } fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id) } fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) } fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) } fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id) } - fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option), HandleError> { + fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id) } - fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result, HandleError> { + fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id) } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id) } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) } - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), HandleError> { + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { + let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) } fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { - let mut new_events = Vec::new(); + let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); let mut failed_payments = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); let short_to_id = channel_state.short_to_id; + let pending_msg_events = channel_state.pending_msg_events; if no_connection_possible { + log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_their_node_id() == *their_node_id { if let Some(short_id) = chan.get_short_channel_id() { @@ -2566,7 +2902,7 @@ impl ChannelMessageHandler for ChannelManager { } failed_channels.push(chan.force_shutdown()); if let Ok(update) = self.get_channel_update(&chan) { - new_events.push(events::Event::BroadcastChannelUpdate { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -2576,6 +2912,7 @@ impl ChannelMessageHandler for ChannelManager { } }); } else { + log_debug!(self, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_their_node_id() == *their_node_id { //TODO: mark channel disabled (and maybe announce such after a timeout). @@ -2598,12 +2935,6 @@ impl ChannelMessageHandler for ChannelManager { for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } - if !new_events.is_empty() { - let mut pending_events = self.pending_events.lock().unwrap(); - for event in new_events.drain(..) { - pending_events.push(event); - } - } for (chan_update, mut htlc_sources) in failed_payments { for (htlc_source, payment_hash) in htlc_sources.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() }); @@ -2611,9 +2942,13 @@ impl ChannelMessageHandler for ChannelManager { } } - fn peer_connected(&self, their_node_id: &PublicKey) -> Vec { - let mut res = Vec::new(); - let mut channel_state = self.channel_state.lock().unwrap(); + fn peer_connected(&self, their_node_id: &PublicKey) { + log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id)); + + let _ = self.total_consistency_lock.read().unwrap(); + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); + let pending_msg_events = channel_state.pending_msg_events; channel_state.by_id.retain(|_, chan| { if chan.get_their_node_id() == *their_node_id { if !chan.have_received_message() { @@ -2623,16 +2958,20 @@ impl ChannelMessageHandler for ChannelManager { // drop it. false } else { - res.push(chan.get_channel_reestablish()); + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.get_their_node_id(), + msg: chan.get_channel_reestablish(), + }); true } } else { true } }); //TODO: Also re-broadcast announcement_signatures - res } fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) { + let _ = self.total_consistency_lock.read().unwrap(); + if msg.channel_id == [0; 32] { for chan in self.list_channels() { if chan.remote_network_id == *their_node_id { @@ -2645,36 +2984,424 @@ impl ChannelMessageHandler for ChannelManager { } } -#[cfg(test)] -mod tests { - use chain::chaininterface; - use chain::transaction::OutPoint; - use chain::chaininterface::ChainListener; - use chain::keysinterface::KeysInterface; - use chain::keysinterface; - use ln::channelmanager::{ChannelManager,OnionKeys,PaymentFailReason}; - use ln::channelmonitor::{ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; - use ln::router::{Route, RouteHop, Router}; - use ln::msgs; - use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; - use util::test_utils; - use util::events::{Event, EventsProvider}; - use util::errors::APIError; - use util::logger::Logger; - use util::ser::Writeable; +const SERIALIZATION_VERSION: u8 = 1; +const MIN_SERIALIZATION_VERSION: u8 = 1; - use bitcoin::util::hash::Sha256dHash; - use bitcoin::blockdata::block::{Block, BlockHeader}; - use bitcoin::blockdata::transaction::{Transaction, TxOut}; - use bitcoin::blockdata::constants::genesis_block; - use bitcoin::network::constants::Network; - use bitcoin::network::serialize::serialize; - use bitcoin::network::serialize::BitcoinHash; +impl Writeable for PendingForwardHTLCInfo { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + if let &Some(ref onion) = &self.onion_packet { + 1u8.write(writer)?; + onion.write(writer)?; + } else { + 0u8.write(writer)?; + } + self.incoming_shared_secret.write(writer)?; + self.payment_hash.write(writer)?; + self.short_channel_id.write(writer)?; + self.amt_to_forward.write(writer)?; + self.outgoing_cltv_value.write(writer)?; + Ok(()) + } +} - use hex; +impl Readable for PendingForwardHTLCInfo { + fn read(reader: &mut R) -> Result { + let onion_packet = match >::read(reader)? { + 0 => None, + 1 => Some(msgs::OnionPacket::read(reader)?), + _ => return Err(DecodeError::InvalidValue), + }; + Ok(PendingForwardHTLCInfo { + onion_packet, + incoming_shared_secret: Readable::read(reader)?, + payment_hash: Readable::read(reader)?, + short_channel_id: Readable::read(reader)?, + amt_to_forward: Readable::read(reader)?, + outgoing_cltv_value: Readable::read(reader)?, + }) + } +} - use secp256k1::{Secp256k1, Message}; - use secp256k1::key::{PublicKey,SecretKey}; +impl Writeable for HTLCFailureMsg { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &HTLCFailureMsg::Relay(ref fail_msg) => { + 0u8.write(writer)?; + fail_msg.write(writer)?; + }, + &HTLCFailureMsg::Malformed(ref fail_msg) => { + 1u8.write(writer)?; + fail_msg.write(writer)?; + } + } + Ok(()) + } +} + +impl Readable for HTLCFailureMsg { + fn read(reader: &mut R) -> Result { + match >::read(reader)? { + 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)), + 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)), + _ => Err(DecodeError::InvalidValue), + } + } +} + +impl Writeable for PendingHTLCStatus { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &PendingHTLCStatus::Forward(ref forward_info) => { + 0u8.write(writer)?; + forward_info.write(writer)?; + }, + &PendingHTLCStatus::Fail(ref fail_msg) => { + 1u8.write(writer)?; + fail_msg.write(writer)?; + } + } + Ok(()) + } +} + +impl Readable for PendingHTLCStatus { + fn read(reader: &mut R) -> Result { + match >::read(reader)? { + 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)), + 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)), + _ => Err(DecodeError::InvalidValue), + } + } +} + +impl_writeable!(HTLCPreviousHopData, 0, { + short_channel_id, + htlc_id, + incoming_packet_shared_secret +}); + +impl Writeable for HTLCSource { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &HTLCSource::PreviousHopData(ref hop_data) => { + 0u8.write(writer)?; + hop_data.write(writer)?; + }, + &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => { + 1u8.write(writer)?; + route.write(writer)?; + session_priv.write(writer)?; + first_hop_htlc_msat.write(writer)?; + } + } + Ok(()) + } +} + +impl Readable for HTLCSource { + fn read(reader: &mut R) -> Result { + match >::read(reader)? { + 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + 1 => Ok(HTLCSource::OutboundRoute { + route: Readable::read(reader)?, + session_priv: Readable::read(reader)?, + first_hop_htlc_msat: Readable::read(reader)?, + }), + _ => Err(DecodeError::InvalidValue), + } + } +} + +impl Writeable for HTLCFailReason { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &HTLCFailReason::ErrorPacket { ref err } => { + 0u8.write(writer)?; + err.write(writer)?; + }, + &HTLCFailReason::Reason { ref failure_code, ref data } => { + 1u8.write(writer)?; + failure_code.write(writer)?; + data.write(writer)?; + } + } + Ok(()) + } +} + +impl Readable for HTLCFailReason { + fn read(reader: &mut R) -> Result { + match >::read(reader)? { + 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }), + 1 => Ok(HTLCFailReason::Reason { + failure_code: Readable::read(reader)?, + data: Readable::read(reader)?, + }), + _ => Err(DecodeError::InvalidValue), + } + } +} + +impl_writeable!(HTLCForwardInfo, 0, { + prev_short_channel_id, + prev_htlc_id, + forward_info +}); + +impl Writeable for ChannelManager { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + let _ = self.total_consistency_lock.write().unwrap(); + + writer.write_all(&[SERIALIZATION_VERSION; 1])?; + writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?; + + self.genesis_hash.write(writer)?; + (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?; + self.last_block_hash.lock().unwrap().write(writer)?; + + let channel_state = self.channel_state.lock().unwrap(); + let mut unfunded_channels = 0; + for (_, channel) in channel_state.by_id.iter() { + if !channel.is_funding_initiated() { + unfunded_channels += 1; + } + } + ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?; + for (_, channel) in channel_state.by_id.iter() { + if channel.is_funding_initiated() { + channel.write(writer)?; + } + } + + (channel_state.forward_htlcs.len() as u64).write(writer)?; + for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() { + short_channel_id.write(writer)?; + (pending_forwards.len() as u64).write(writer)?; + for forward in pending_forwards { + forward.write(writer)?; + } + } + + (channel_state.claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() { + payment_hash.write(writer)?; + (previous_hops.len() as u64).write(writer)?; + for previous_hop in previous_hops { + previous_hop.write(writer)?; + } + } + + Ok(()) + } +} + +/// Arguments for the creation of a ChannelManager that are not deserialized. +/// +/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation +/// is: +/// 1) Deserialize all stored ChannelMonitors. +/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash, +/// ChannelManager)>::read(reader, args). +/// This may result in closing some Channels if the ChannelMonitor is newer than the stored +/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted. +/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using +/// ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo(). +/// 4) Reconnect blocks on your ChannelMonitors. +/// 5) Move the ChannelMonitors into your local ManyChannelMonitor. +/// 6) Disconnect/connect blocks on the ChannelManager. +/// 7) Register the new ChannelManager with your ChainWatchInterface (this does not happen +/// automatically as it does in ChannelManager::new()). +pub struct ChannelManagerReadArgs<'a> { + /// The keys provider which will give us relevant keys. Some keys will be loaded during + /// deserialization. + pub keys_manager: Arc, + + /// The fee_estimator for use in the ChannelManager in the future. + /// + /// No calls to the FeeEstimator will be made during deserialization. + pub fee_estimator: Arc, + /// The ManyChannelMonitor for use in the ChannelManager in the future. + /// + /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that + /// you have deserialized ChannelMonitors separately and will add them to your + /// ManyChannelMonitor after deserializing this ChannelManager. + pub monitor: Arc, + /// The ChainWatchInterface for use in the ChannelManager in the future. + /// + /// No calls to the ChainWatchInterface will be made during deserialization. + pub chain_monitor: Arc, + /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be + /// used to broadcast the latest local commitment transactions of channels which must be + /// force-closed during deserialization. + pub tx_broadcaster: Arc, + /// The Logger for use in the ChannelManager and which may be used to log information during + /// deserialization. + pub logger: Arc, + /// Default settings used for new channels. Any existing channels will continue to use the + /// runtime settings which were stored when the ChannelManager was serialized. + pub default_config: UserConfig, + + /// A map from channel funding outpoints to ChannelMonitors for those channels (ie + /// value.get_funding_txo() should be the key). + /// + /// If a monitor is inconsistent with the channel state during deserialization the channel will + /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This + /// is true for missing channels as well. If there is a monitor missing for which we find + /// channel data Err(DecodeError::InvalidValue) will be returned. + /// + /// In such cases the latest local transactions will be sent to the tx_broadcaster included in + /// this struct. + pub channel_monitors: &'a HashMap, +} + +impl<'a, R : ::std::io::Read> ReadableArgs> for (Sha256dHash, ChannelManager) { + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a>) -> Result { + let _ver: u8 = Readable::read(reader)?; + let min_ver: u8 = Readable::read(reader)?; + if min_ver > SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let genesis_hash: Sha256dHash = Readable::read(reader)?; + let latest_block_height: u32 = Readable::read(reader)?; + let last_block_hash: Sha256dHash = Readable::read(reader)?; + + let mut closed_channels = Vec::new(); + + let channel_count: u64 = Readable::read(reader)?; + let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); + let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + for _ in 0..channel_count { + let mut channel: Channel = ReadableArgs::read(reader, args.logger.clone())?; + if channel.last_block_connected != last_block_hash { + return Err(DecodeError::InvalidValue); + } + + let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?; + funding_txo_set.insert(funding_txo.clone()); + if let Some(monitor) = args.channel_monitors.get(&funding_txo) { + if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() || + channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() || + channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() { + let mut force_close_res = channel.force_shutdown(); + force_close_res.0 = monitor.get_latest_local_commitment_txn(); + closed_channels.push(force_close_res); + } else { + if let Some(short_channel_id) = channel.get_short_channel_id() { + short_to_id.insert(short_channel_id, channel.channel_id()); + } + by_id.insert(channel.channel_id(), channel); + } + } else { + return Err(DecodeError::InvalidValue); + } + } + + for (ref funding_txo, ref monitor) in args.channel_monitors.iter() { + if !funding_txo_set.contains(funding_txo) { + closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new())); + } + } + + let forward_htlcs_count: u64 = Readable::read(reader)?; + let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, 128)); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); + } + forward_htlcs.insert(short_channel_id, pending_forwards); + } + + let claimable_htlcs_count: u64 = Readable::read(reader)?; + let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + for _ in 0..claimable_htlcs_count { + let payment_hash = Readable::read(reader)?; + let previous_hops_len: u64 = Readable::read(reader)?; + let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2)); + for _ in 0..previous_hops_len { + previous_hops.push(Readable::read(reader)?); + } + claimable_htlcs.insert(payment_hash, previous_hops); + } + + let channel_manager = ChannelManager { + genesis_hash, + fee_estimator: args.fee_estimator, + monitor: args.monitor, + chain_monitor: args.chain_monitor, + tx_broadcaster: args.tx_broadcaster, + + latest_block_height: AtomicUsize::new(latest_block_height as usize), + last_block_hash: Mutex::new(last_block_hash), + secp_ctx: Secp256k1::new(), + + channel_state: Mutex::new(ChannelHolder { + by_id, + short_to_id, + next_forward: Instant::now(), + forward_htlcs, + claimable_htlcs, + pending_msg_events: Vec::new(), + }), + our_network_key: args.keys_manager.get_node_secret(), + + pending_events: Mutex::new(Vec::new()), + total_consistency_lock: RwLock::new(()), + keys_manager: args.keys_manager, + logger: args.logger, + default_configuration: args.default_config, + }; + + for close_res in closed_channels.drain(..) { + channel_manager.finish_force_close_channel(close_res); + //TODO: Broadcast channel update for closed channels, but only after we've made a + //connection or two. + } + + Ok((last_block_hash.clone(), channel_manager)) + } +} + +#[cfg(test)] +mod tests { + use chain::chaininterface; + use chain::transaction::OutPoint; + use chain::chaininterface::{ChainListener, ChainWatchInterface}; + use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor}; + use chain::keysinterface; + use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; + use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder, PaymentPreimage, PaymentHash}; + use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor}; + use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT}; + use ln::router::{Route, RouteHop, Router}; + use ln::msgs; + use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; + use util::test_utils; + use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; + use util::errors::APIError; + use util::logger::Logger; + use util::ser::{Writeable, Writer, ReadableArgs}; + use util::config::UserConfig; + + use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; + use bitcoin::util::bip143; + use bitcoin::util::address::Address; + use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey}; + use bitcoin::blockdata::block::{Block, BlockHeader}; + use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType}; + use bitcoin::blockdata::script::{Builder, Script}; + use bitcoin::blockdata::opcodes; + use bitcoin::blockdata::constants::genesis_block; + use bitcoin::network::constants::Network; + + use hex; + + use secp256k1::{Secp256k1, Message}; + use secp256k1::key::{PublicKey,SecretKey}; use crypto::sha2::Sha256; use crypto::digest::Digest; @@ -2682,7 +3409,7 @@ mod tests { use rand::{thread_rng,Rng}; use std::cell::RefCell; - use std::collections::{BTreeSet, HashMap}; + use std::collections::{BTreeSet, HashMap, HashSet}; use std::default::Default; use std::rc::Rc; use std::sync::{Arc, Mutex}; @@ -2810,7 +3537,7 @@ mod tests { }, ); - let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]); + let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &PaymentHash([0x42; 32])); // Just check the final packet encoding, as it includes all the per-hop vectors in it // anyway... assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap()); @@ -2821,22 +3548,22 @@ mod tests { // Returning Errors test vectors from BOLT 4 let onion_keys = build_test_onion_keys(); - let onion_error = ChannelManager::build_failure_packet(&onion_keys[4].shared_secret, 0x2002, &[0; 0]); + let onion_error = ChannelManager::build_failure_packet(&onion_keys[4].shared_secret[..], 0x2002, &[0; 0]); assert_eq!(onion_error.encode(), hex::decode("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap()); - let onion_packet_1 = ChannelManager::encrypt_failure_packet(&onion_keys[4].shared_secret, &onion_error.encode()[..]); + let onion_packet_1 = ChannelManager::encrypt_failure_packet(&onion_keys[4].shared_secret[..], &onion_error.encode()[..]); assert_eq!(onion_packet_1.data, hex::decode("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap()); - let onion_packet_2 = ChannelManager::encrypt_failure_packet(&onion_keys[3].shared_secret, &onion_packet_1.data[..]); + let onion_packet_2 = ChannelManager::encrypt_failure_packet(&onion_keys[3].shared_secret[..], &onion_packet_1.data[..]); assert_eq!(onion_packet_2.data, hex::decode("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap()); - let onion_packet_3 = ChannelManager::encrypt_failure_packet(&onion_keys[2].shared_secret, &onion_packet_2.data[..]); + let onion_packet_3 = ChannelManager::encrypt_failure_packet(&onion_keys[2].shared_secret[..], &onion_packet_2.data[..]); assert_eq!(onion_packet_3.data, hex::decode("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap()); - let onion_packet_4 = ChannelManager::encrypt_failure_packet(&onion_keys[1].shared_secret, &onion_packet_3.data[..]); + let onion_packet_4 = ChannelManager::encrypt_failure_packet(&onion_keys[1].shared_secret[..], &onion_packet_3.data[..]); assert_eq!(onion_packet_4.data, hex::decode("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap()); - let onion_packet_5 = ChannelManager::encrypt_failure_packet(&onion_keys[0].shared_secret, &onion_packet_4.data[..]); + let onion_packet_5 = ChannelManager::encrypt_failure_packet(&onion_keys[0].shared_secret[..], &onion_packet_4.data[..]); assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap()); } @@ -2856,6 +3583,7 @@ mod tests { chan_monitor: Arc, node: Arc, router: Router, + node_seed: [u8; 32], network_payment_count: Rc>, network_chan_count: Rc>, } @@ -2863,6 +3591,7 @@ mod tests { fn drop(&mut self) { if !::std::thread::panicking() { // Check that we processed all pending events + assert_eq!(self.node.get_and_clear_pending_msg_events().len(), 0); assert_eq!(self.node.get_and_clear_pending_events().len(), 0); assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0); } @@ -2879,20 +3608,80 @@ mod tests { (announcement, as_update, bs_update, channel_id, tx) } - fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction { - node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); + macro_rules! get_revoke_commit_msgs { + ($node: expr, $node_id: expr) => { + { + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + (match events[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, $node_id); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }, match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, $node_id); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + updates.commitment_signed.clone() + }, + _ => panic!("Unexpected event"), + }) + } + } + } - let events_1 = node_a.node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - let accept_chan = match events_1[0] { - Event::SendOpenChannel { ref node_id, ref msg } => { - assert_eq!(*node_id, node_b.node.get_our_node_id()); - node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), msg).unwrap() - }, - _ => panic!("Unexpected event"), - }; + macro_rules! get_event_msg { + ($node: expr, $event_type: path, $node_id: expr) => { + { + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + $event_type { ref node_id, ref msg } => { + assert_eq!(*node_id, $node_id); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + } + } + } + } + + macro_rules! get_htlc_update_msgs { + ($node: expr, $node_id: expr) => { + { + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, $node_id); + (*updates).clone() + }, + _ => panic!("Unexpected event"), + } + } + } + } + + macro_rules! get_feerate { + ($node: expr, $channel_id: expr) => { + { + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + chan.get_feerate() + } + } + } - node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_chan).unwrap(); + + fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction { + node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); + node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap(); + node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap(); let chan_id = *node_a.network_chan_count.borrow(); let tx; @@ -2908,7 +3697,7 @@ mod tests { tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut { value: *channel_value_satoshis, script_pubkey: output_script.clone(), }]}; - funding_output = OutPoint::new(Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0); + funding_output = OutPoint::new(tx.txid(), 0); node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output); let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); @@ -2919,22 +3708,15 @@ mod tests { _ => panic!("Unexpected event"), } - let events_3 = node_a.node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - let funding_signed = match events_3[0] { - Event::SendFundingCreated { ref node_id, ref msg } => { - assert_eq!(*node_id, node_b.node.get_our_node_id()); - let res = node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), msg).unwrap(); - let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, funding_output); - added_monitors.clear(); - res - }, - _ => panic!("Unexpected event"), - }; + node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap(); + { + let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 1); + assert_eq!(added_monitors[0].0, funding_output); + added_monitors.clear(); + } - node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &funding_signed).unwrap(); + node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())).unwrap(); { let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -2957,30 +3739,27 @@ mod tests { fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) { confirm_transaction(&node_b.chain_monitor, &tx, tx.version); - let events_5 = node_b.node.get_and_clear_pending_events(); - assert_eq!(events_5.len(), 1); - match events_5[0] { - Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_a.node.get_our_node_id()); - assert!(announcement_sigs.is_none()); - node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap() - }, - _ => panic!("Unexpected event"), - }; + node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingLocked, node_a.node.get_our_node_id())).unwrap(); let channel_id; confirm_transaction(&node_a.chain_monitor, &tx, tx.version); - let events_6 = node_a.node.get_and_clear_pending_events(); - assert_eq!(events_6.len(), 1); - (match events_6[0] { - Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { + let events_6 = node_a.node.get_and_clear_pending_msg_events(); + assert_eq!(events_6.len(), 2); + ((match events_6[0] { + MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { channel_id = msg.channel_id.clone(); assert_eq!(*node_id, node_b.node.get_our_node_id()); - (msg.clone(), announcement_sigs.clone().unwrap()) + msg.clone() + }, + _ => panic!("Unexpected event"), + }, match events_6[1] { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + assert_eq!(*node_id, node_b.node.get_our_node_id()); + msg.clone() }, _ => panic!("Unexpected event"), - }, channel_id) + }), channel_id) } fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) { @@ -2990,26 +3769,24 @@ mod tests { } fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { - let bs_announcement_sigs = { - let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap(); - node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap(); - bs_announcement_sigs - }; + node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap(); + let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id()); + node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap(); - let events_7 = node_b.node.get_and_clear_pending_events(); + let events_7 = node_b.node.get_and_clear_pending_msg_events(); assert_eq!(events_7.len(), 1); let (announcement, bs_update) = match events_7[0] { - Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { + MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { (msg, update_msg) }, _ => panic!("Unexpected event"), }; node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap(); - let events_8 = node_a.node.get_and_clear_pending_events(); + let events_8 = node_a.node.get_and_clear_pending_msg_events(); assert_eq!(events_8.len(), 1); let as_update = match events_8[0] { - Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { + MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { assert!(*announcement == *msg); update_msg }, @@ -3046,70 +3823,94 @@ mod tests { } } - fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) { - let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) }; + macro_rules! get_closing_signed_broadcast { + ($node: expr, $dest_pubkey: expr) => { + { + let events = $node.get_and_clear_pending_msg_events(); + assert!(events.len() == 1 || events.len() == 2); + (match events[events.len() - 1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + msg.clone() + }, + _ => panic!("Unexpected event"), + }, if events.len() == 2 { + match events[0] { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(*node_id, $dest_pubkey); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }) + } + } + } + + fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { + let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; let (tx_a, tx_b); node_a.close_channel(channel_id).unwrap(); - let events_1 = node_a.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - let shutdown_a = match events_1[0] { - Event::SendShutdown { ref node_id, ref msg } => { - assert_eq!(node_id, &node_b.get_our_node_id()); + node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())).unwrap(); + + let events_1 = node_b.get_and_clear_pending_msg_events(); + assert!(events_1.len() >= 1); + let shutdown_b = match events_1[0] { + MessageSendEvent::SendShutdown { ref node_id, ref msg } => { + assert_eq!(node_id, &node_a.get_our_node_id()); msg.clone() }, _ => panic!("Unexpected event"), }; - let (shutdown_b, mut closing_signed_b) = node_b.handle_shutdown(&node_a.get_our_node_id(), &shutdown_a).unwrap(); - if !close_inbound_first { - assert!(closing_signed_b.is_none()); - } - let (empty_a, mut closing_signed_a) = node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b.unwrap()).unwrap(); - assert!(empty_a.is_none()); - if close_inbound_first { - assert!(closing_signed_a.is_none()); - closing_signed_a = node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap(); + let closing_signed_b = if !close_inbound_first { + assert_eq!(events_1.len(), 1); + None + } else { + Some(match events_1[1] { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(node_id, &node_a.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event"), + }) + }; + + node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap(); + let (as_update, bs_update) = if close_inbound_first { + assert!(node_a.get_and_clear_pending_msg_events().is_empty()); + node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap(); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); + let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); - let empty_b = node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap(); - assert!(empty_b.is_none()); + node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap(); + let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + assert!(none_b.is_none()); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); + (as_update, bs_update) } else { - closing_signed_b = node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap(); + let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); + + node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a).unwrap(); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); + let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); - let empty_a2 = node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap(); - assert!(empty_a2.is_none()); + node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap(); + let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + assert!(none_a.is_none()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); - } + (as_update, bs_update) + }; assert_eq!(tx_a, tx_b); check_spends!(tx_a, funding_tx); - let events_2 = node_a.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - let as_update = match events_2[0] { - Event::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }; - - let events_3 = node_b.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - let bs_update = match events_3[0] { - Event::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }; - - (as_update, bs_update) + (as_update, bs_update, tx_a) } struct SendEvent { @@ -3126,12 +3927,18 @@ mod tests { SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } } - fn from_event(event: Event) -> SendEvent { + fn from_event(event: MessageSendEvent) -> SendEvent { match event { - Event::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), + MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), _ => panic!("Unexpected event type!"), } } + + fn from_node(node: &Node) -> SendEvent { + let mut events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.pop().unwrap()) + } } macro_rules! check_added_monitors { @@ -3145,21 +3952,58 @@ mod tests { } macro_rules! commitment_signed_dance { - ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => { { check_added_monitors!($node_a, 0); - let (as_revoke_and_ack, as_commitment_signed) = $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); check_added_monitors!($node_a, 1); + commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { + { + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); - assert!($node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); + $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!($node_b, 1); - let (bs_revoke_and_ack, bs_none) = $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); - assert!(bs_none.is_none()); + $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap(); + let (bs_revoke_and_ack, extra_msg_option) = { + let events = $node_b.node.get_and_clear_pending_msg_events(); + assert!(events.len() <= 2); + (match events[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, $node_a.node.get_our_node_id()); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }, events.get(1).map(|e| e.clone())) + }; check_added_monitors!($node_b, 1); if $fail_backwards { assert!($node_a.node.get_and_clear_pending_events().is_empty()); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } - assert!($node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + (extra_msg_option, bs_revoke_and_ack) + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => { + { + check_added_monitors!($node_a, 0); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + check_added_monitors!($node_a, 1); + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); + assert!(extra_msg_option.is_none()); + bs_revoke_and_ack + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + { + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); + $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); { let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); if $fail_backwards { @@ -3170,6 +4014,26 @@ mod tests { } added_monitors.clear(); } + extra_msg_option + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => { + { + assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none()); + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { + { + commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true); + if $fail_backwards { + let channel_state = $node_a.node.channel_state.lock().unwrap(); + assert_eq!(channel_state.pending_msg_events.len(), 1); + if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] { + assert_ne!(*node_id, $node_b.node.get_our_node_id()); + } else { panic!("Unexpected event"); } + } else { + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + } } } } @@ -3177,25 +4041,25 @@ mod tests { macro_rules! get_payment_preimage_hash { ($node: expr) => { { - let payment_preimage = [*$node.network_payment_count.borrow(); 32]; + let payment_preimage = PaymentPreimage([*$node.network_payment_count.borrow(); 32]); *$node.network_payment_count.borrow_mut() += 1; - let mut payment_hash = [0; 32]; + let mut payment_hash = PaymentHash([0; 32]); let mut sha = Sha256::new(); - sha.input(&payment_preimage[..]); - sha.result(&mut payment_hash); + sha.input(&payment_preimage.0[..]); + sha.result(&mut payment_hash.0[..]); (payment_preimage, payment_hash) } } } - fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { + fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node); let mut payment_event = { origin_node.node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(origin_node, 1); - let mut events = origin_node.node.get_and_clear_pending_events(); + let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; @@ -3218,9 +4082,9 @@ mod tests { node.node.channel_state.lock().unwrap().next_forward = Instant::now(); node.node.process_pending_htlc_forwards(); - let mut events_2 = node.node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); if idx == expected_route.len() - 1 { + let events_2 = node.node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); match events_2[0] { Event::PaymentReceived { ref payment_hash, amt } => { assert_eq!(our_payment_hash, *payment_hash); @@ -3229,6 +4093,8 @@ mod tests { _ => panic!("Unexpected event"), } } else { + let mut events_2 = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); check_added_monitors!(node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); @@ -3240,50 +4106,70 @@ mod tests { (our_payment_preimage, our_payment_hash) } - fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) { + fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: PaymentPreimage) { assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage)); check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None; - macro_rules! update_fulfill_dance { - ($node: expr, $prev_node: expr, $last_node: expr) => { + let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id(); + macro_rules! get_next_msgs { + ($node: expr) => { + { + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + expected_next_node = node_id.clone(); + Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())) + }, + _ => panic!("Unexpected event"), + } + } + } + } + + macro_rules! last_update_fulfill_dance { + ($node: expr, $prev_node: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); - if $last_node { - check_added_monitors!($node, 0); + check_added_monitors!($node, 0); + assert!($node.node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); + } + } + } + macro_rules! mid_update_fulfill_dance { + ($node: expr, $prev_node: expr, $new_msgs: expr) => { + { + $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); + check_added_monitors!($node, 1); + let new_next_msgs = if $new_msgs { + get_next_msgs!($node) } else { - check_added_monitors!($node, 1); - } + assert!($node.node.get_and_clear_pending_msg_events().is_empty()); + None + }; commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); + next_msgs = new_next_msgs; } } } - let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id(); let mut prev_node = expected_route.last().unwrap(); for (idx, node) in expected_route.iter().rev().enumerate() { assert_eq!(expected_next_node, node.node.get_our_node_id()); + let update_next_msgs = !skip_last || idx != expected_route.len() - 1; if next_msgs.is_some() { - update_fulfill_dance!(node, prev_node, false); - } - - let events = node.node.get_and_clear_pending_events(); - if !skip_last || idx != expected_route.len() - 1 { - assert_eq!(events.len(), 1); - match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - expected_next_node = node_id.clone(); - next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())); - }, - _ => panic!("Unexpected event"), - } + mid_update_fulfill_dance!(node, prev_node, update_next_msgs); + } else if update_next_msgs { + next_msgs = get_next_msgs!(node); } else { - assert!(events.is_empty()); + assert!(node.node.get_and_clear_pending_msg_events().is_empty()); } if !skip_last && idx == expected_route.len() - 1 { assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); @@ -3293,7 +4179,7 @@ mod tests { } if !skip_last { - update_fulfill_dance!(origin_node, expected_route.first().unwrap(), true); + last_update_fulfill_dance!(origin_node, expected_route.first().unwrap()); let events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -3305,13 +4191,13 @@ mod tests { } } - fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) { + fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: PaymentPreimage) { claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage); } const TEST_FINAL_CLTV: u32 = 32; - fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { + fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap(); assert_eq!(route.hops.len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.hops.iter()) { @@ -3342,7 +4228,7 @@ mod tests { claim_payment(&origin, expected_route, our_payment_preimage); } - fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) { + fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: PaymentHash) { assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, PaymentFailReason::PreimageUnknown)); check_added_monitors!(expected_route.last().unwrap(), 1); @@ -3367,11 +4253,11 @@ mod tests { update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1); } - let events = node.node.get_and_clear_pending_events(); + let events = node.node.get_and_clear_pending_msg_events(); if !skip_last || idx != expected_route.len() - 1 { assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -3407,7 +4293,7 @@ mod tests { } } - fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) { + fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: PaymentHash) { fail_payment_along_route(origin_node, expected_route, false, our_payment_hash); } @@ -3415,22 +4301,25 @@ mod tests { let mut nodes = Vec::new(); let mut rng = thread_rng(); let secp_ctx = Secp256k1::new(); - let logger: Arc = Arc::new(test_utils::TestLogger::new()); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); - for _ in 0..node_count { + for i in 0..node_count { + let logger: Arc = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i))); let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }); let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger))); let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())}); let mut seed = [0; 32]; rng.fill_bytes(&mut seed); let keys_manager = Arc::new(keysinterface::KeysManager::new(&seed, Network::Testnet, Arc::clone(&logger))); - let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone())); - let node = ChannelManager::new(0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone()).unwrap(); + let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone())); + let mut config = UserConfig::new(); + config.channel_options.announced_channel = true; + config.channel_limits.force_announced_channel_preference = false; + let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap(); let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger)); - nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, + nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, node_seed: seed, network_payment_count: payment_count.clone(), network_chan_count: chan_count.clone(), }); @@ -3445,14 +4334,6 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; - macro_rules! get_feerate { - ($node: expr) => {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); - chan.get_feerate() - }} - } - // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); @@ -3474,13 +4355,13 @@ mod tests { // (6) RAA is delivered -> // First nodes[0] generates an update_fee - nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap(); check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { // (1) - Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -3494,7 +4375,7 @@ mod tests { check_added_monitors!(nodes[1], 1); let payment_event = { - let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 1); SendEvent::from_event(events_1.remove(0)) }; @@ -3503,40 +4384,45 @@ mod tests { // ...now when the messages get delivered everyone should be happy nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) - assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); // deliver(1), generate (3): - let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); - assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting nodes[0] revoke_and_ack + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - let bs_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2) - assert!(bs_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (4) - assert!(bs_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (4) - assert!(bs_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (4) - assert!(bs_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (4) - assert!(bs_update.as_ref().unwrap().update_fee.is_none()); // (4) + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); // deliver (2) + let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(bs_update.update_add_htlcs.is_empty()); // (4) + assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) + assert!(bs_update.update_fail_htlcs.is_empty()); // (4) + assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4) + assert!(bs_update.update_fee.is_none()); // (4) check_added_monitors!(nodes[1], 1); - let as_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); // deliver (3) - assert!(as_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (5) - assert!(as_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (5) - assert!(as_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (5) - assert!(as_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (5) - assert!(as_update.as_ref().unwrap().update_fee.is_none()); // (5) + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); // deliver (3) + let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(as_update.update_add_htlcs.is_empty()); // (5) + assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) + assert!(as_update.update_fail_htlcs.is_empty()); // (5) + assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5) + assert!(as_update.update_fee.is_none()); // (5) check_added_monitors!(nodes[0], 1); - let (as_second_revoke, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.unwrap().commitment_signed).unwrap(); // deliver (4) - assert!(as_second_commitment_signed.is_none()); // only (6) + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); // deliver (4) + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // only (6) so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - let (bs_second_revoke, bs_second_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.unwrap().commitment_signed).unwrap(); // deliver (5) - assert!(bs_second_commitment_signed.is_none()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed).unwrap(); // deliver (5) + let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap(); check_added_monitors!(nodes[0], 1); let events_2 = nodes[0].node.get_and_clear_pending_events(); @@ -3546,7 +4432,7 @@ mod tests { _ => panic!("Unexpected event"), } - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); // deliver (6) + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); // deliver (6) check_added_monitors!(nodes[1], 1); } @@ -3558,25 +4444,17 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; - macro_rules! get_feerate { - ($node: expr) => {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); - chan.get_feerate() - }} - } - // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); // First nodes[0] generates an update_fee - nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap(); check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let update_msg = match events_0[0] { // (1) - Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { update_fee.as_ref() }, _ => panic!("Unexpected event"), @@ -3590,7 +4468,7 @@ mod tests { check_added_monitors!(nodes[1], 1); let payment_event = { - let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 1); SendEvent::from_event(events_1.remove(0)) }; @@ -3599,11 +4477,12 @@ mod tests { // ...now when the messages get delivered everyone should be happy nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) - assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); // deliver (2) + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2) check_added_monitors!(nodes[1], 1); // We can't continue, sadly, because our (1) now has a bogus signature @@ -3615,14 +4494,6 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; - macro_rules! get_feerate { - ($node: expr) => {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); - chan.get_feerate() - }} - } - // A B // update_fee/commitment_signed -> // .- send (1) RAA and (2) commitment_signed @@ -3643,14 +4514,14 @@ mod tests { // revoke_and_ack -> // First nodes[0] generates an update_fee - let initial_feerate = get_feerate!(nodes[0]); + let initial_feerate = get_feerate!(nodes[0], channel_id); nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap(); check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) - Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { (update_fee.as_ref().unwrap(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -3658,13 +4529,15 @@ mod tests { // Deliver first update_fee/commitment_signed pair, generating (1) and (2): nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap(); - let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap(); + let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment // transaction: nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap(); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Create the (3) update_fee message that nodes[0] will generate before it does... let mut update_msg_2 = msgs::UpdateFee { @@ -3679,36 +4552,43 @@ mod tests { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); // Deliver (1), generating (3) and (4) - let as_second_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); + let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); - assert!(as_second_update.as_ref().unwrap().update_add_htlcs.is_empty()); - assert!(as_second_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); - assert!(as_second_update.as_ref().unwrap().update_fail_htlcs.is_empty()); - assert!(as_second_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(as_second_update.update_add_htlcs.is_empty()); + assert!(as_second_update.update_fulfill_htlcs.is_empty()); + assert!(as_second_update.update_fail_htlcs.is_empty()); + assert!(as_second_update.update_fail_malformed_htlcs.is_empty()); // Check that the update_fee newly generated matches what we delivered: - assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); - assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); + assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); + assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); // Deliver (2) commitment_signed - let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), bs_commitment_signed.as_ref().unwrap()).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap(); + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); - assert!(as_commitment_signed.is_none()); + // No commitment_signed so get_event_msg's assert(len == 1) passes - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Delever (4) - let (bs_second_revoke, bs_second_commitment) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.unwrap().commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed).unwrap(); + let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (as_second_revoke, as_second_commitment) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment.unwrap()).unwrap(); - assert!(as_second_commitment.is_none()); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment).unwrap(); + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -3718,77 +4598,115 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; - macro_rules! get_feerate { - ($node: expr) => {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); - chan.get_feerate() - }} - } - - let feerate = get_feerate!(nodes[0]); - nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + let feerate = get_feerate!(nodes[0], channel_id); + nodes[0].node.update_fee(channel_id, feerate+25).unwrap(); + check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); - let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); - let commitment_signed = commitment_signed.unwrap(); - check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); - assert!(resp_option.is_none()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); - assert!(commitment_signed.is_none()); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); - assert!(resp_option.is_none()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } + #[test] + fn test_update_fee_that_funder_cannot_afford() { + let nodes = create_network(2); + let channel_value = 1888; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000); + let channel_id = chan.2; + + let feerate = 260; + nodes[0].node.update_fee(channel_id, feerate).unwrap(); + check_added_monitors!(nodes[0], 1); + let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()).unwrap(); + + commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); + + //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above. + //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve) + { + let chan_lock = nodes[1].node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + + //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs + let num_htlcs = chan.last_local_commitment_txn[0].output.len() - 2; + let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; + let mut actual_fee = chan.last_local_commitment_txn[0].output.iter().fold(0, |acc, output| acc + output.value); + actual_fee = channel_value - actual_fee; + assert_eq!(total_fee, actual_fee); + } //drop the mutex + + //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially + //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down) + nodes[0].node.update_fee(channel_id, feerate+2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()).unwrap(); + + //While producing the commitment_signed response after handling a received update_fee request the + //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) + //Should produce and error. + let err = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed).unwrap_err(); + + assert!(match err.err { + "Funding remote cannot afford proposed new fee" => true, + _ => false, + }); + + //clear the message we could not handle + nodes[1].node.get_and_clear_pending_msg_events(); + } + #[test] fn test_update_fee_with_fundee_update_add_htlc() { let mut nodes = create_network(2); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; - macro_rules! get_feerate { - ($node: expr) => {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); - chan.get_feerate() - }} - } - // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - let feerate = get_feerate!(nodes[0]); + let feerate = get_feerate!(nodes[0], channel_id); nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); - check_added_monitors!(nodes[0], 1); - let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); - let commitment_signed = commitment_signed.unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap(); @@ -3802,21 +4720,23 @@ mod tests { assert_eq!(added_monitors.len(), 0); added_monitors.clear(); } - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // node[1] has nothing to do - let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); - assert!(resp_option.is_none()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); - assert!(commitment_signed.is_none()); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + check_added_monitors!(nodes[1], 1); // AwaitingRemoteRevoke ends here - let commitment_update = resp_option.unwrap(); + let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(commitment_update.update_add_htlcs.len(), 1); assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); assert_eq!(commitment_update.update_fail_htlcs.len(), 0); @@ -3824,20 +4744,22 @@ mod tests { assert_eq!(commitment_update.update_fee.is_none(), true); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap(); - let (revoke, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); check_added_monitors!(nodes[0], 1); + let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap(); check_added_monitors!(nodes[1], 1); - let commitment_signed = commitment_signed.unwrap(); - let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap(); - check_added_monitors!(nodes[1], 1); - assert!(resp_option.is_none()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let (revoke, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap(); check_added_monitors!(nodes[1], 1); - assert!(commitment_signed.is_none()); - let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap(); + let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap(); check_added_monitors!(nodes[0], 1); - assert!(resp_option.is_none()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -3868,14 +4790,6 @@ mod tests { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; - macro_rules! get_feerate { - ($node: expr) => {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); - chan.get_feerate() - }} - } - // A B // (1) update_fee/commitment_signed -> // <- (2) revoke_and_ack @@ -3891,13 +4805,14 @@ mod tests { // revoke_and_ack -> // Create and deliver (1)... - let feerate = get_feerate!(nodes[0]); + let feerate = get_feerate!(nodes[0], channel_id); nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -3905,62 +4820,393 @@ mod tests { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); // Generate (2) and (3): - let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); - let commitment_signed_0 = commitment_signed.unwrap(); - check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); // Deliver (2): - let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); - assert!(resp_option.is_none()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); // Create and deliver (4)... nodes[0].node.update_fee(channel_id, feerate+30).unwrap(); - let events_0 = nodes[0].node.get_and_clear_pending_events(); + check_added_monitors!(nodes[0], 1); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); - let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); - // ... creating (5) - assert!(commitment_signed.is_none()); - check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); check_added_monitors!(nodes[1], 1); + // ... creating (5) + let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes // Handle (3), creating (6): - let (revoke_msg_0, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap(); - assert!(commitment_signed.is_none()); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap(); check_added_monitors!(nodes[0], 1); + let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes // Deliver (5): - let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); - assert!(resp_option.is_none()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); // Deliver (6), creating (7): - let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap(); - let commitment_signed = resp_option.unwrap().commitment_signed; + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap(); + let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(commitment_update.update_add_htlcs.is_empty()); + assert!(commitment_update.update_fulfill_htlcs.is_empty()); + assert!(commitment_update.update_fail_htlcs.is_empty()); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(commitment_update.update_fee.is_none()); check_added_monitors!(nodes[1], 1); // Deliver (7) - let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); - assert!(commitment_signed.is_none()); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); check_added_monitors!(nodes[0], 1); - let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); - assert!(resp_option.is_none()); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert_eq!(get_feerate!(nodes[0]), feerate + 30); - assert_eq!(get_feerate!(nodes[1]), feerate + 30); + assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); } + #[test] + fn pre_funding_lock_shutdown_test() { + // Test sending a shutdown prior to funding_locked after funding generation + let nodes = create_network(2); + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]); + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]); + + nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); + } + + #[test] + fn updates_shutdown_wait() { + // Test sending a shutdown with outstanding updates pending + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[0].node.close_channel(&chan_1.2).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]); + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {} + else { panic!("New sends should fail!") }; + if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {} + else { panic!("New sends should fail!") }; + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); + } + + #[test] + fn htlc_fail_async_shutdown() { + // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap(); + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fulfill_htlcs.is_empty()); + assert_eq!(updates_2.update_fail_htlcs.len(), 1); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, ref rejected_by_dest } => { + assert_eq!(our_payment_hash, *payment_hash); + assert!(!rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); + } + + fn do_test_shutdown_rebroadcast(recv_count: u8) { + // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of + // messages delivered prior to disconnect + let nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000); + + nodes[1].node.close_channel(&chan_1.2).unwrap(); + let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + if recv_count > 0 { + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); + let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + if recv_count > 1 { + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap(); + let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_shutdown == node_1_2nd_shutdown); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap(); + let node_0_2nd_shutdown = if recv_count > 0 { + let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap(); + node_0_2nd_shutdown + } else { + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap(); + get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) + }; + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap(); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + assert!(updates_2.update_add_htlcs.is_empty()); + assert!(updates_2.update_fail_htlcs.is_empty()); + assert!(updates_2.update_fail_malformed_htlcs.is_empty()); + assert!(updates_2.update_fee.is_none()); + assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(our_payment_preimage, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + if recv_count > 0 { + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + assert!(node_1_closing_signed.is_some()); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + if recv_count == 0 { + // If all closing_signeds weren't delivered we can just resume where we left off... + let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap(); + let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); + assert!(node_0_2nd_shutdown == node_0_3rd_shutdown); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap(); + let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); + + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap(); + let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + assert!(node_0_closing_signed == node_0_2nd_closing_signed); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap(); + let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); + let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + assert!(node_0_none.is_none()); + } else { + // If one node, however, received + responded with an identical closing_signed we end + // up erroring and node[0] will try to broadcast its own latest commitment transaction. + // There isn't really anything better we can do simply, but in the future we might + // explore storing a set of recently-closed channels that got disconnected during + // closing_signed and avoiding broadcasting local commitment txn for some timeout to + // give our counterparty enough time to (potentially) broadcast a cooperative closing + // transaction. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) = + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) { + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg); + let msgs::ErrorMessage {ref channel_id, ..} = msg; + assert_eq!(*channel_id, chan_1.2); + } else { panic!("Needed SendErrorMessage close"); } + + // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and + // checks it, but in this case nodes[0] didn't ever get a chance to receive a + // closing_signed so we do it ourselves + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + } + + assert!(nodes[0].node.list_channels().is_empty()); + + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[2].node.list_channels().is_empty()); + } + + #[test] + fn test_shutdown_rebroadcast() { + do_test_shutdown_rebroadcast(0); + do_test_shutdown_rebroadcast(1); + do_test_shutdown_rebroadcast(2); + } + #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -4145,7 +5391,10 @@ mod tests { false } else { true } }); - assert_eq!(res.len(), 2); + assert!(res.len() == 2 || res.len() == 3); + if res.len() == 3 { + assert_eq!(res[1], res[2]); + } } assert!(node_txn.is_empty()); @@ -4191,19 +5440,19 @@ mod tests { } fn get_announce_close_broadcast_events(nodes: &Vec, a: usize, b: usize) { - let events_1 = nodes[a].node.get_and_clear_pending_events(); + let events_1 = nodes[a].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 1); let as_update = match events_1[0] { - Event::BroadcastChannelUpdate { ref msg } => { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { msg.clone() }, _ => panic!("Unexpected event"), }; - let events_2 = nodes[b].node.get_and_clear_pending_events(); + let events_2 = nodes[b].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let bs_update = match events_2[0] { - Event::BroadcastChannelUpdate { ref msg } => { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { msg.clone() }, _ => panic!("Unexpected event"), @@ -4228,8 +5477,7 @@ mod tests { }} } - #[test] - fn channel_reserve_test() { + fn do_channel_reserve_test(test_recv: bool) { use util::rng; use std::sync::atomic::Ordering; use ln::msgs::HandleError; @@ -4262,7 +5510,7 @@ mod tests { macro_rules! expect_forward { ($node: expr) => {{ - let mut events = $node.node.get_and_clear_pending_events(); + let mut events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); check_added_monitors!($node, 1); let payment_event = SendEvent::from_event(events.remove(0)); @@ -4345,7 +5593,7 @@ mod tests { nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap(); check_added_monitors!(nodes[0], 1); - let mut events = nodes[0].node.get_and_clear_pending_events(); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; @@ -4386,9 +5634,23 @@ mod tests { onion_routing_packet: onion_packet, }; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); - match err { - HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + if test_recv { + let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); + match err { + HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + } + // If we send a garbage message, the channel should get closed, making the rest of this test case fail. + assert_eq!(nodes[1].node.list_channels().len(), 1); + assert_eq!(nodes[1].node.list_channels().len(), 1); + let channel_close_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(channel_close_broadcast.len(), 1); + match channel_close_broadcast[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + return; } } @@ -4421,19 +5683,25 @@ mod tests { // this will also stuck in the holding cell nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap(); check_added_monitors!(nodes[0], 0); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // flush the pending htlc - let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap(); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); check_added_monitors!(nodes[0], 1); - let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); - assert!(bs_none.is_none()); + let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed).unwrap(); + let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4490,6 +5758,12 @@ mod tests { assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22); } + #[test] + fn channel_reserve_test() { + do_channel_reserve_test(false); + do_channel_reserve_test(true); + } + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -4541,10 +5815,10 @@ mod tests { assert!($node.node.claim_funds($preimage)); check_added_monitors!($node, 1); - let events = $node.node.get_and_clear_pending_events(); + let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(*node_id, $prev_node.node.get_our_node_id()); @@ -4615,7 +5889,13 @@ mod tests { get_announce_close_broadcast_events(&nodes, 3, 4); assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); + } + + #[test] + fn test_justice_tx() { + // Test justice txn built on revoked HTLC-Success tx, against both sides + let nodes = create_network(2); // Create some new channels: let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -4629,7 +5909,7 @@ mod tests { assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present assert_eq!(revoked_local_txn[1].input.len(), 1); assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout // Revoke the old state claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); @@ -4654,19 +5934,58 @@ mod tests { test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 0); - } - #[test] - fn revoked_output_claim() { - // Simple test to ensure a node will claim a revoked output when a stale remote commitment - // transaction is broadcast by its counterparty - let nodes = create_network(2); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output - let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); - assert_eq!(revoked_local_txn.len(), 1); + // We test justice_tx build by A on B's revoked HTLC-Success tx + // Create some new channels: + let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // A pending HTLC which will be revoked: + let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + // Get the will-be-revoked local txn from B + let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid()); + assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present + // Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4); + { + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected + assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output + + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + node_txn.swap_remove(0); + } + test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); + header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1); + test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone()); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + } + + #[test] + fn revoked_output_claim() { + // Simple test to ensure a node will claim a revoked output when a stale remote commitment + // transaction is broadcast by its counterparty + let nodes = create_network(2); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 1); // Only output is the full channel value back to nodes[0]: assert_eq!(revoked_local_txn[0].output.len(), 1); // Send a payment through, updating everyone's latest commitment txn @@ -4700,7 +6019,7 @@ mod tests { send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); @@ -4709,7 +6028,7 @@ mod tests { assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); assert_eq!(revoked_local_txn[1].input.len(), 1); assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone()); //Revoke the old state @@ -4717,10 +6036,18 @@ mod tests { { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2); + }, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 4); @@ -4735,8 +6062,8 @@ mod tests { witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len()); assert_eq!(witness_lens.len(), 3); assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC - assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC // Next nodes[1] broadcasts its current local tx state: assert_eq!(node_txn[1].input.len(), 1); @@ -4744,7 +6071,7 @@ mod tests { assert_eq!(node_txn[2].input.len(), 1); let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap(); - assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid()); assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid); @@ -4766,7 +6093,7 @@ mod tests { // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this // time as two different claim transactions as we're gonna to timeout htlc with given a high current height let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); @@ -4776,10 +6103,18 @@ mod tests { { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); - nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2); + }, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) @@ -4807,15 +6142,15 @@ mod tests { witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len()); assert_eq!(witness_lens.len(), 3); assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC - assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC assert_eq!(node_txn[3].input.len(), 1); check_spends!(node_txn[3], chan_1.3.clone()); assert_eq!(node_txn[4].input.len(), 1); let witness_script = node_txn[4].input[0].witness.last().unwrap(); - assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid()); assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid); @@ -4826,1245 +6161,3290 @@ mod tests { } #[test] - fn test_htlc_ignore_latest_remote_commitment() { - // Test that HTLC transactions spending the latest remote commitment transaction are simply - // ignored if we cannot claim them. This originally tickled an invalid unwrap(). - let nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); - - route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); - { - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, - _ => panic!("Unexpected event"), - } - } + fn test_htlc_on_chain_success() { + // Test that in case of an unilateral close onchain, we detect the state of output thanks to + // ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is + // broadcasting the right event to other nodes in payment path. + // A --------------------> B ----------------------> C (preimage) + // First, C should claim the HTLC output via HTLC-Success when its own latest local + // commitment transaction was broadcast. + // Then, B should learn the preimage from said transactions, attempting to claim backwards + // towards B. + // B should be able to claim via preimage if A then broadcasts its local tx. + // Finally, when A sees B's latest local commitment transaction it should be able to claim + // the HTLC output via the preimage it learned (which, once confirmed should generate a + // PaymentSent event). - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); + let nodes = create_network(3); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + // Rebalance the network a bit by relaying one payment through all the channels... + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + + let (our_payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + + // Broadcast legit commitment tx from C on B's chain + // Broadcast HTLC Success transation by C on received output from C's commitment tx on B's chain + let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(commitment_tx.len(), 1); + check_spends!(commitment_tx[0], chan_2.3.clone()); + nodes[2].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 2 (2 * HTLC-Success tx) + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn[1], commitment_tx[0]); + assert_eq!(node_txn[0], node_txn[2]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_eq!(node_txn[0].lock_time, 0); + + // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); { - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, - _ => panic!("Unexpected event"), - } + let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 1); + assert_eq!(added_monitors[0].0.txid, chan_1.3.txid()); + added_monitors.clear(); + } + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), + }; + { + // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate + // commitment transaction with a corresponding HTLC-Timeout transaction, as well as a + // timeout-claim of the output that nodes[2] just claimed via success. + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 (timeout tx) * 2 (block-rescan) + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[0], node_txn[3]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_ne!(node_txn[0].lock_time, 0); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + check_spends!(node_txn[1], chan_2.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(node_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_ne!(node_txn[2].lock_time, 0); + node_txn.clear(); + } + + // Broadcast legit commitment tx from A on B's chain + // Broadcast preimage tx by B on offered output from A commitment tx on A's chain + let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_1.3.clone()); + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan) + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn[0], node_txn[2]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!(node_txn[0].lock_time, 0); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + check_spends!(node_txn[1], chan_1.3.clone()); + assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + // We don't bother to check that B can claim the HTLC output on its commitment tx here as + // we already checked the same situation with A. - // Duplicate the block_connected call since this may happen due to other listeners - // registering new transactions - nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage } => { + assert_eq!(payment_preimage, our_payment_preimage); + }, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 (HTLC-Timeout tx) * 2 (block-rescan) + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[0], node_txn[3]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_ne!(node_txn[0].lock_time, 0); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + check_spends!(node_txn[1], chan_1.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(node_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_ne!(node_txn[2].lock_time, 0); } #[test] - fn test_force_close_fail_back() { - // Check which HTLCs are failed-backwards on channel force-closure - let mut nodes = create_network(3); - create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 1, 2); - - let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); + fn test_htlc_on_chain_timeout() { + // Test that in case of an unilateral close onchain, we detect the state of output thanks to + // ChainWatchInterface and timeout the HTLC bacward accordingly. So here we test that ChannelManager is + // broadcasting the right event to other nodes in payment path. + // A ------------------> B ----------------------> C (timeout) + // B's commitment tx C's commitment tx + // \ \ + // B's HTLC timeout tx B's timeout tx - let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + let nodes = create_network(3); - let mut payment_event = { - nodes[0].node.send_payment(route, our_payment_hash).unwrap(); - check_added_monitors!(nodes[0], 1); + // Create some intial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; + // Rebalance the network a bit by relaying one payment thorugh all the channels... + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + let (_payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; - let events_1 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::PendingHTLCsForwardable { .. } => { }, + // Brodacast legit commitment tx from C on B's chain + let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_2.3.clone()); + nodes[2].node.fail_htlc_backwards(&payment_hash, PaymentFailReason::PreimageUnknown); + { + let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 1); + added_monitors.clear(); + } + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(!update_fail_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + }, _ => panic!("Unexpected event"), }; + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], chan_2.3.clone()); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71); - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); - nodes[1].node.process_pending_htlc_forwards(); - - let mut events_2 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - payment_event = SendEvent::from_event(events_2.remove(0)); - assert_eq!(payment_event.msgs.len(), 1); - + // Broadcast timeout transaction by B on received output fron C's commitment tx on B's chain + // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); + let timeout_tx; + { + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 8); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 6 (HTLC-Timeout tx, commitment tx, timeout tx) * 2 (block-rescan) + assert_eq!(node_txn[0], node_txn[5]); + assert_eq!(node_txn[1], node_txn[6]); + assert_eq!(node_txn[2], node_txn[7]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[1], chan_2.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71); + assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[3], chan_2.3.clone()); + check_spends!(node_txn[4], node_txn[3].clone()); + assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(node_txn[4].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + timeout_tx = node_txn[0].clone(); + node_txn.clear(); + } + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[1], 1); - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - check_added_monitors!(nodes[2], 1); - - // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous - // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC - // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - - nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); - let events_3 = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - match events_3[0] { - Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, _ => panic!("Unexpected event"), } - - let tx = { - let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't - // have a use for it unless nodes[2] learns the preimage somehow, the funds will go - // back to nodes[1] upon timeout otherwise. - assert_eq!(node_txn.len(), 1); - node_txn.remove(0) + match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(!update_fail_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), }; + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // Well... here we detect our own htlc_timeout_tx so no tx to be generated + assert_eq!(node_txn.len(), 0); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + // Broadcast legit commitment tx from B on A's chain + let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_1.3.clone()); - let events_4 = nodes[1].node.get_and_clear_pending_events(); - // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, _ => panic!("Unexpected event"), } - - // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. - { - let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap(); - monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap() - .provide_payment_preimage(&our_payment_hash, &our_payment_preimage); - } - nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); - let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 1); - assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid()); - assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success - assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success - - check_spends!(node_txn[0], tx); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 block-rescan + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[0], node_txn[3]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[1], chan_1.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71); + assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); } #[test] - fn test_unconf_chan() { - // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side - let nodes = create_network(2); + fn test_simple_commitment_revoked_fail_backward() { + // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx + // and fail backward accordingly. + + let nodes = create_network(3); + + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 1); - assert_eq!(channel_state.short_to_id.len(), 1); - mem::drop(channel_state); + let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + // Get the will-be-revoked local txn from nodes[2] + let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + // Revoke the old state + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - let mut headers = Vec::new(); - let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - headers.push(header.clone()); - for _i in 2..100 { - header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - headers.push(header.clone()); - } - while !headers.is_empty() { - nodes[0].node.block_disconnected(&headers.pop().unwrap()); + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[1], 1); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), } - { - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, - _ => panic!("Unexpected event"), - } + match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fail_htlcs.len(), 1); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { .. } => {}, + _ => panic!("Unexpected event"), + } + }, + _ => panic!("Unexpected event"), } - let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 0); - assert_eq!(channel_state.short_to_id.len(), 0); } - /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas - /// for claims/fails they are separated out. - fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { - let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id()); - let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id()); + fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) { + // Test that if our counterparty broadcasts a revoked commitment transaction we fail all + // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest + // commitment transaction anymore. + // To do this, we have the peer which will broadcast a revoked commitment transaction send + // a number of update_fail/commitment_signed updates without ever sending the RAA in + // response to our commitment_signed. This is somewhat misbehavior-y, though not + // technically disallowed and we should probably handle it reasonably. + // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet + // failed/fulfilled backwards must be in at least one of the latest two remote commitment + // transactions: + // * Once we move it out of our holding cell/add it, we will immediately include it in a + // commitment_signed (implying it will be in the latest remote commitment transaction). + // * Once they remove it, we will send a (the first) commitment_signed without the HTLC, + // and once they revoke the previous commitment transaction (allowing us to send a new + // commitment_signed) we will be free to fail/fulfill the HTLC backwards. + let mut nodes = create_network(3); - let mut resp_1 = Vec::new(); - for msg in reestablish_1 { - resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap()); + // Create some initial channels + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + // Get the will-be-revoked local txn from nodes[2] + let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + // Revoke the old state + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let (_, first_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + + assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + // Drop the last RAA from 3 -> 2 + + assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + // Note that nodes[1] is in AwaitingRAA, so won't send a CS + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + + assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + // At this point first_payment_hash has dropped out of the latest two commitment + // transactions that nodes[1] is tracking... + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + + // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting + // on nodes[2]'s RAA. + let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, fourth_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(route, fourth_payment_hash).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + check_added_monitors!(nodes[1], 0); + + if deliver_bs_raa { + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap(); + // One monitor for the new revocation preimage, one as we generate a commitment for + // nodes[0] to fail first_payment_hash backwards. + check_added_monitors!(nodes[1], 2); } - if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { - check_added_monitors!(node_b, 1); - } else { - check_added_monitors!(node_b, 0); + + let mut failed_htlcs = HashSet::new(); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, fourth_payment_hash); + }, + _ => panic!("Unexpected event"), } - let mut resp_2 = Vec::new(); - for msg in reestablish_2 { - resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap()); + if !deliver_bs_raa { + // If we delivered the RAA already then we already failed first_payment_hash backwards. + check_added_monitors!(nodes[1], 1); } - if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { - check_added_monitors!(node_a, 1); - } else { - check_added_monitors!(node_a, 0); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 }); + match events[if deliver_bs_raa { 2 } else { 0 }] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), + } + if deliver_bs_raa { + match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert_eq!(nodes[2].node.get_our_node_id(), *node_id); + assert_eq!(update_add_htlcs.len(), 1); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + }, + _ => panic!("Unexpected event"), + } } + // Due to the way backwards-failing occurs we do the updates in two steps. + let updates = match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fail_htlcs.len(), 1); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); - // We dont yet support both needing updates, as that would require a different commitment dance: - assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) || - (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0)); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_second_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[0], 1); - for chan_msgs in resp_1.drain(..) { - if pre_all_htlcs { - let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); - let _announcement_sigs_opt = a.unwrap(); - //TODO: Test announcement_sigs re-sending when we've implemented it - } else { - assert!(chan_msgs.0.is_none()); - } - if pending_raa.0 { - assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); - assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); - check_added_monitors!(node_a, 1); - } else { - assert!(chan_msgs.1.is_none()); - } - if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { - let commitment_update = chan_msgs.2.unwrap(); - if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed - assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize); - } else { - assert!(commitment_update.update_add_htlcs.is_empty()); - } - assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); - assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); - assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); - for update_add in commitment_update.update_add_htlcs { - node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap(); - } - for update_fulfill in commitment_update.update_fulfill_htlcs { - node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap(); + if !deliver_bs_raa { + // If we delievered B's RAA we got an unknown preimage error, not something + // that we should update our routing table for. + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } } - for update_fail in commitment_update.update_fail_htlcs { - node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), } - if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed - commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); - } else { - let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); - check_added_monitors!(node_a, 1); - assert!(as_commitment_signed.is_none()); - assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(node_b, 1); - } - } else { - assert!(chan_msgs.2.is_none()); - } - } + bs_second_update + }, + _ => panic!("Unexpected event"), + }; - for chan_msgs in resp_2.drain(..) { - if pre_all_htlcs { - let _announcement_sigs_opt = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); - //TODO: Test announcement_sigs re-sending when we've implemented it - } else { - assert!(chan_msgs.0.is_none()); - } - if pending_raa.1 { - assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); - assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); - check_added_monitors!(node_b, 1); - } else { - assert!(chan_msgs.1.is_none()); - } - if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { - let commitment_update = chan_msgs.2.unwrap(); - if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed - assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize); - } - assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); - assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); - assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); - for update_add in commitment_update.update_add_htlcs { - node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap(); - } - for update_fulfill in commitment_update.update_fulfill_htlcs { - node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap(); - } - for update_fail in commitment_update.update_fail_htlcs { - node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap(); - } + assert!(updates.update_add_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 2); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); - if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed - commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); - } else { - let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); - check_added_monitors!(node_b, 1); - assert!(bs_commitment_signed.is_none()); - assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(node_a, 1); - } - } else { - assert!(chan_msgs.2.is_none()); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for event in events { + match event { + MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), } } - } - - #[test] - fn test_simple_peer_disconnect() { - // Test that we can reconnect when there are no lost messages - let nodes = create_network(3); - create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 1, 2); - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); - - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), + } - let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + assert!(failed_htlcs.contains(&first_payment_hash.0)); + assert!(failed_htlcs.contains(&second_payment_hash.0)); + assert!(failed_htlcs.contains(&third_payment_hash.0)); + } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + #[test] + fn test_commitment_revoked_fail_backward_exhaustive() { + do_test_commitment_revoked_fail_backward_exhaustive(false); + do_test_commitment_revoked_fail_backward_exhaustive(true); + } - claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3); - fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); + #[test] + fn test_htlc_ignore_latest_remote_commitment() { + // Test that HTLC transactions spending the latest remote commitment transaction are simply + // ignored if we cannot claim them. This originally tickled an invalid unwrap(). + let nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); + route_payment(&nodes[0], &[&nodes[1]], 10000000); + nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); { - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); match events[0] { - Event::PaymentSent { payment_preimage } => { - assert_eq!(payment_preimage, payment_preimage_3); + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); }, _ => panic!("Unexpected event"), } - match events[1] { - Event::PaymentFailed { payment_hash, rejected_by_dest } => { - assert_eq!(payment_hash, payment_hash_5); - assert!(rejected_by_dest); + } + + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 2); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + + { + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); }, _ => panic!("Unexpected event"), } } - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); + // Duplicate the block_connected call since this may happen due to other listeners + // registering new transactions + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); } - fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { - // Test that we can reconnect when in-flight HTLC updates get dropped - let mut nodes = create_network(2); - if messages_delivered == 0 { - create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); - // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect) - } else { - create_announced_chan_between_nodes(&nodes, 0, 1); - } + #[test] + fn test_force_close_fail_back() { + // Check which HTLCs are failed-backwards on channel force-closure + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); - let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); - let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); + + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + + let mut payment_event = { + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + payment_event = SendEvent::from_event(events_2.remove(0)); + assert_eq!(payment_event.msgs.len(), 1); + + check_added_monitors!(nodes[1], 1); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + + // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous + // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC + // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). + + nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); + let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + + let tx = { + let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); + // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't + // have a use for it unless nodes[2] learns the preimage somehow, the funds will go + // back to nodes[1] upon timeout otherwise. + assert_eq!(node_txn.len(), 1); + node_txn.remove(0) + }; + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + + let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); + // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! + assert_eq!(events_4.len(), 1); + match events_4[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + + // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. + { + let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap(); + monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap() + .provide_payment_preimage(&our_payment_hash, &our_payment_preimage); + } + nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid()); + assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success + assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success + + check_spends!(node_txn[0], tx); + } + + #[test] + fn test_unconf_chan() { + // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side + let nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + assert_eq!(channel_state.by_id.len(), 1); + assert_eq!(channel_state.short_to_id.len(), 1); + mem::drop(channel_state); + + let mut headers = Vec::new(); + let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + headers.push(header.clone()); + for _i in 2..100 { + header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + headers.push(header.clone()); + } + while !headers.is_empty() { + nodes[0].node.block_disconnected(&headers.pop().unwrap()); + } + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + } + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + assert_eq!(channel_state.by_id.len(), 0); + assert_eq!(channel_state.short_to_id.len(), 0); + } + + macro_rules! get_chan_reestablish_msgs { + ($src_node: expr, $dst_node: expr) => { + { + let mut res = Vec::with_capacity(1); + for msg in $src_node.node.get_and_clear_pending_msg_events() { + if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + res.push(msg.clone()); + } else { + panic!("Unexpected event") + } + } + res + } + } + } + + macro_rules! handle_chan_reestablish_msgs { + ($src_node: expr, $dst_node: expr) => { + { + let msg_events = $src_node.node.get_and_clear_pending_msg_events(); + let mut idx = 0; + let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + Some(msg.clone()) + } else { + None + }; + + let mut revoke_and_ack = None; + let mut commitment_update = None; + let order = if let Some(ev) = msg_events.get(idx) { + idx += 1; + match ev { + &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + revoke_and_ack = Some(msg.clone()); + RAACommitmentOrder::RevokeAndACKFirst + }, + &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + commitment_update = Some(updates.clone()); + RAACommitmentOrder::CommitmentFirst + }, + _ => panic!("Unexpected event"), + } + } else { + RAACommitmentOrder::CommitmentFirst + }; + + if let Some(ev) = msg_events.get(idx) { + match ev { + &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + assert!(revoke_and_ack.is_none()); + revoke_and_ack = Some(msg.clone()); + }, + &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + assert!(commitment_update.is_none()); + commitment_update = Some(updates.clone()); + }, + _ => panic!("Unexpected event"), + } + } + + (funding_locked, revoke_and_ack, commitment_update, order) + } + } + } + + /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas + /// for claims/fails they are separated out. + fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { + node_a.node.peer_connected(&node_b.node.get_our_node_id()); + let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); + node_b.node.peer_connected(&node_a.node.get_our_node_id()); + let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + + if send_funding_locked.0 { + // If a expects a funding_locked, it better not think it has received a revoke_and_ack + // from b + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.1 { + // If b expects a funding_locked, it better not think it has received a revoke_and_ack + // from a + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.0 || send_funding_locked.1 { + // If we expect any funding_locked's, both sides better have set + // next_local_commitment_number to 1 + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + } + + let mut resp_1 = Vec::new(); + for msg in reestablish_1 { + node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap(); + resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a)); + } + if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { + check_added_monitors!(node_b, 1); + } else { + check_added_monitors!(node_b, 0); + } + + let mut resp_2 = Vec::new(); + for msg in reestablish_2 { + node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap(); + resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b)); + } + if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { + check_added_monitors!(node_a, 1); + } else { + check_added_monitors!(node_a, 0); + } + + // We dont yet support both needing updates, as that would require a different commitment dance: + assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) || + (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0)); + + for chan_msgs in resp_1.drain(..) { + if send_funding_locked.0 { + node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); + let announcement_event = node_a.node.get_and_clear_pending_msg_events(); + if !announcement_event.is_empty() { + assert_eq!(announcement_event.len(), 1); + if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] { + //TODO: Test announcement_sigs re-sending + } else { panic!("Unexpected event!"); } + } + } else { + assert!(chan_msgs.0.is_none()); + } + if pending_raa.0 { + assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); + node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap(); + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(node_a, 1); + } else { + assert!(chan_msgs.1.is_none()); + } + if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { + let commitment_update = chan_msgs.2.unwrap(); + if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed + assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize); + } else { + assert!(commitment_update.update_add_htlcs.is_empty()); + } + assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); + assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + for update_add in commitment_update.update_add_htlcs { + node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap(); + } + for update_fulfill in commitment_update.update_fulfill_htlcs { + node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap(); + } + for update_fail in commitment_update.update_fail_htlcs { + node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap(); + } + + if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed + commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); + } else { + node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(node_a, 1); + let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(node_b, 1); + } + } else { + assert!(chan_msgs.2.is_none()); + } + } + + for chan_msgs in resp_2.drain(..) { + if send_funding_locked.1 { + node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); + let announcement_event = node_b.node.get_and_clear_pending_msg_events(); + if !announcement_event.is_empty() { + assert_eq!(announcement_event.len(), 1); + if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] { + //TODO: Test announcement_sigs re-sending + } else { panic!("Unexpected event!"); } + } + } else { + assert!(chan_msgs.0.is_none()); + } + if pending_raa.1 { + assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); + node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap(); + assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(node_b, 1); + } else { + assert!(chan_msgs.1.is_none()); + } + if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { + let commitment_update = chan_msgs.2.unwrap(); + if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed + assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize); + } + assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); + assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + for update_add in commitment_update.update_add_htlcs { + node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap(); + } + for update_fulfill in commitment_update.update_fulfill_htlcs { + node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap(); + } + for update_fail in commitment_update.update_fail_htlcs { + node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap(); + } + + if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed + commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); + } else { + node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(node_b, 1); + let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(node_a, 1); + } + } else { + assert!(chan_msgs.2.is_none()); + } + } + } + + #[test] + fn test_simple_peer_disconnect() { + // Test that we can reconnect when there are no lost messages + let nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3); + fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); + + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); + { + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentSent { payment_preimage } => { + assert_eq!(payment_preimage, payment_preimage_3); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentFailed { payment_hash, rejected_by_dest } => { + assert_eq!(payment_hash, payment_hash_5); + assert!(rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + } + + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); + fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); + } + + fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { + // Test that we can reconnect when in-flight HTLC updates get dropped + let mut nodes = create_network(2); + if messages_delivered == 0 { + create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); + // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect) + } else { + create_announced_chan_between_nodes(&nodes, 0, 1); + } + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + let payment_event = { + nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); + + if messages_delivered < 2 { + // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! + } else { + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + if messages_delivered >= 3 { + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + + if messages_delivered >= 4 { + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 5 { + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap(); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 6 { + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + } + } + } + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + if messages_delivered < 3 { + // Even if the funding_locked messages get exchanged, as long as nothing further was + // received on either side, both sides will need to resend them. + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 3 { + // nodes[0] still wants its RAA + commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); + } else if messages_delivered == 4 { + // nodes[0] still wants its commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 5 { + // nodes[1] still wants its final RAA + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); + } else if messages_delivered == 6 { + // Everything was delivered... + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + nodes[1].node.claim_funds(payment_preimage_1); + check_added_monitors!(nodes[1], 1); + + let events_3 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_3.len(), 1); + let (update_fulfill_htlc, commitment_signed) = match events_3[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if messages_delivered >= 1 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap(); + + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(payment_preimage_1, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + if messages_delivered >= 2 { + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + if messages_delivered >= 3 { + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 4 { + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap(); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 5 { + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + } + } + } + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + if messages_delivered < 2 { + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + //TODO: Deduplicate PaymentSent events, then enable this if: + //if messages_delivered < 1 { + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(payment_preimage_1, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + //} + } else if messages_delivered == 2 { + // nodes[0] still wants its RAA + commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); + } else if messages_delivered == 3 { + // nodes[0] still wants its commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 4 { + // nodes[1] still wants its final RAA + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); + } else if messages_delivered == 5 { + // Everything was delivered... + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // Channel should still work fine... + let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_drop_messages_peer_disconnect_a() { + do_test_drop_messages_peer_disconnect(0); + do_test_drop_messages_peer_disconnect(1); + do_test_drop_messages_peer_disconnect(2); + do_test_drop_messages_peer_disconnect(3); + } + + #[test] + fn test_drop_messages_peer_disconnect_b() { + do_test_drop_messages_peer_disconnect(4); + do_test_drop_messages_peer_disconnect(5); + do_test_drop_messages_peer_disconnect(6); + } + + #[test] + fn test_funding_peer_disconnect() { + // Test that we can lock in our funding tx while disconnected + let nodes = create_network(2); + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version); + let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + + reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version); + let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 2); + match events_2[0] { + MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + match events_2[1] { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // TODO: We shouldn't need to manually pass list_usable_chanels here once we support + // rebroadcasting announcement_signatures upon reconnect. + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } + + #[test] + fn test_drop_messages_peer_disconnect_dual_htlc() { + // Test that we can handle reconnecting when both sides of a channel have pending + // commitment_updates when we disconnect. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + MessageSendEvent::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + + let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst); + + assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1); + assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap(); + let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(as_commitment_signed.update_add_htlcs.is_empty()); + assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap(); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap(); + let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + let events_4 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt: _ } => { + assert_eq!(payment_hash_2, *payment_hash); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_simple_monitor_permanent_update_fail() { + // Test that we handle a simple permanent monitor update failure + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 2); + match events_1[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + }; + match events_1[1] { + MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()), + _ => panic!("Unexpected event"), + }; + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { + // Test that we can recover from a simple temporary monitor update failure optionally with + // a disconnect in between + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + let payment_event = SendEvent::from_event(events_2.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_3 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + + // Now set it to failed again... + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } - let payment_event = { - nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap(); + // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let events_5 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + #[test] + fn test_simple_monitor_temporary_update_fail() { + do_test_simple_monitor_temporary_update_fail(false); + do_test_simple_monitor_temporary_update_fail(true); + } + + fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { + let disconnect_flags = 8 | 16; + + // Test that we can recover from a temporary monitor update failure with some in-flight + // HTLCs going on at the same time potentially with some disconnection thrown in. + // * First we route a payment, then get a temporary monitor update failure when trying to + // route a second payment. We then claim the first payment. + // * If disconnect_count is set, we will disconnect at this point (which is likely as + // TemporaryFailure likely indicates net disconnect which resulted in failing to update + // the ChannelMonitor on a watchtower). + // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment + // immediately, otherwise we wait sconnect and deliver them via the reconnect + // channel_reestablish processing (ie disconnect_count & 16 makes no sense if + // disconnect_count & !disconnect_flags is 0). + // * We then update the channel monitor, reconnecting if disconnect_count is set and walk + // through message sending, potentially disconnect/reconnecting multiple times based on + // disconnect_count, to get the update_fulfill_htlc through. + // * We then walk through more message exchanges to get the original update_add_htlc + // through, swapping message ordering based on disconnect_count & 8 and optionally + // disconnect/reconnecting based on disconnect_count. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] + // but nodes[0] won't respond since it is frozen. + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + if (disconnect_count & 16) == 0 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + } + + (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if disconnect_count & !disconnect_flags > 0 { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + } + + // Now fix monitor updating... + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + macro_rules! disconnect_reconnect_peers { () => { { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + (reestablish_1, reestablish_2, as_resp, bs_resp) + } } } + + let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + check_added_monitors!(nodes[0], 0); + let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + check_added_monitors!(nodes[1], 0); + let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + if (disconnect_count & 16) == 0 { + assert!(bs_resp.2.is_none()); + + assert!(as_resp.1.is_some()); + assert!(as_resp.2.is_some()); + assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst); + } else { + assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); + assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); + assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + + assert!(as_resp.1.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + as_resp.1 = Some(as_resp_raa); + bs_resp.2 = None; + } + + if disconnect_count & !disconnect_flags > 1 { + let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + + if (disconnect_count & 16) == 0 { + assert!(reestablish_1 == second_reestablish_1); + assert!(reestablish_2 == second_reestablish_2); + } + assert!(as_resp == second_as_resp); + assert!(bs_resp == second_bs_resp); + } + + (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap()) + } else { + let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_4.len(), 2); + (SendEvent::from_event(events_4.remove(0)), match events_4[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event"), + }) + }; + + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); + + if disconnect_count & !disconnect_flags > 2 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.is_none()); + } + + let as_commitment_update; + let bs_second_commitment_update; + + macro_rules! handle_bs_raa { () => { + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(as_commitment_update.update_add_htlcs.is_empty()); + assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_update.update_fee.is_none()); check_added_monitors!(nodes[0], 1); + } } + + macro_rules! handle_initial_raa { () => { + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap(); + bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + } } + + if (disconnect_count & 8) == 0 { + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } else { + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); - let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); + assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + } - if messages_delivered < 2 { - // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! - } else { - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - check_added_monitors!(nodes[1], 1); + handle_bs_raa!(); - if messages_delivered >= 3 { - assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[0], 1); + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); - if messages_delivered >= 4 { - let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap(); - assert!(as_commitment_signed.is_none()); - check_added_monitors!(nodes[0], 1); + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); - if messages_delivered >= 5 { - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[1], 1); - } - } + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); } } - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - if messages_delivered < 2 { - // Even if the funding_locked messages get exchanged, as long as nothing further was - // received on either side, both sides will need to resend them. - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); - } else if messages_delivered == 2 { - // nodes[0] still wants its RAA + commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); - } else if messages_delivered == 3 { - // nodes[0] still wants its commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); - } else if messages_delivered == 4 { - // nodes[1] still wants its final RAA - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); - } else if messages_delivered == 5 { - // Everything was delivered... - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - } + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); - let events_1 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); + let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); - nodes[1].node.process_pending_htlc_forwards(); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); - let events_2 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - match events_2[0] { + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { Event::PaymentReceived { ref payment_hash, amt } => { - assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(payment_hash_2, *payment_hash); assert_eq!(amt, 1000000); }, _ => panic!("Unexpected event"), } - nodes[1].node.claim_funds(payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_monitor_temporary_update_fail_a() { + do_test_monitor_temporary_update_fail(0); + do_test_monitor_temporary_update_fail(1); + do_test_monitor_temporary_update_fail(2); + do_test_monitor_temporary_update_fail(3); + do_test_monitor_temporary_update_fail(4); + do_test_monitor_temporary_update_fail(5); + } + + #[test] + fn test_monitor_temporary_update_fail_b() { + do_test_monitor_temporary_update_fail(2 | 8); + do_test_monitor_temporary_update_fail(3 | 8); + do_test_monitor_temporary_update_fail(4 | 8); + do_test_monitor_temporary_update_fail(5 | 8); + } + + #[test] + fn test_monitor_temporary_update_fail_c() { + do_test_monitor_temporary_update_fail(1 | 16); + do_test_monitor_temporary_update_fail(2 | 16); + do_test_monitor_temporary_update_fail(3 | 16); + do_test_monitor_temporary_update_fail(2 | 8 | 16); + do_test_monitor_temporary_update_fail(3 | 8 | 16); + } + + #[test] + fn test_monitor_update_fail_cs() { + // Tests handling of a monitor update failure when processing an incoming commitment_signed + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let events_3 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - let (update_fulfill_htlc, commitment_signed) = match events_3[0] { - Event::UpdateHTLCs { ref node_id, ref updates } => { + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + let responses = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(responses.len(), 2); + + match responses[0] { + MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap(); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + match responses[1] { + MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => { assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, _ => panic!("Unexpected event"), - }; + } - if messages_delivered >= 1 { - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap(); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(payment_preimage_1, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap(); + check_added_monitors!(nodes[1], 1); - if messages_delivered >= 2 { - let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); - check_added_monitors!(nodes[0], 1); + let mut events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); - if messages_delivered >= 3 { - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[1], 1); + events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { payment_hash, amt } => { + assert_eq!(payment_hash, our_payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + }; - if messages_delivered >= 4 { - let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); - assert!(bs_commitment_signed.is_none()); - check_added_monitors!(nodes[1], 1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } - if messages_delivered >= 5 { - assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[0], 1); - } - } - } - } - } + fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { + // Tests handling of a monitor update failure when processing an incoming RAA + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - if messages_delivered < 2 { - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); - //TODO: Deduplicate PaymentSent events, then enable this if: - //if messages_delivered < 1 { - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(payment_preimage_1, *payment_preimage); - }, - _ => panic!("Unexpected event"), - } - //} - } else if messages_delivered == 2 { - // nodes[0] still wants its RAA + commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); - } else if messages_delivered == 3 { - // nodes[0] still wants its commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); - } else if messages_delivered == 4 { - // nodes[1] still wants its final RAA - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); - } else if messages_delivered == 5 { - // Everything was delivered... - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - } + // Rebalance a bit so that we can send backwards from 2 to 1. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + // Route a first payment that we'll fail backwards + let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); - // Channel should still work fine... - let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); - } + // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); - #[test] - fn test_drop_messages_peer_disconnect_a() { - do_test_drop_messages_peer_disconnect(0); - do_test_drop_messages_peer_disconnect(1); - do_test_drop_messages_peer_disconnect(2); - } + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); - #[test] - fn test_drop_messages_peer_disconnect_b() { - do_test_drop_messages_peer_disconnect(3); - do_test_drop_messages_peer_disconnect(4); - do_test_drop_messages_peer_disconnect(5); - } + let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + check_added_monitors!(nodes[0], 0); - #[test] - fn test_funding_peer_disconnect() { - // Test that we can lock in our funding tx while disconnected - let nodes = create_network(2); - let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001); + // While the second channel is AwaitingRAA, forward a second payment to get it into the + // holding cell. + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); - confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version); - let events_1 = nodes[0].node.get_and_clear_pending_events(); + let events_1 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_1.len(), 1); match events_1[0] { - Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - assert!(announcement_sigs.is_none()); - }, + Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now fail monitor updating. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + // Attempt to forward a third payment but fail due to the second channel being unavailable + // for forwarding. + + let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel + send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + check_added_monitors!(nodes[1], 0); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + match events_2.remove(0) { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events[0] { + assert_eq!(payment_hash, payment_hash_3); + assert!(!rejected_by_dest); + } else { panic!("Unexpected event!"); } + }, + _ => panic!("Unexpected event type!"), + }; + + let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { + // Try to route another payment backwards from 2 to make sure 1 holds off on responding + let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[2].node.send_payment(route, payment_hash_4).unwrap(); + check_added_monitors!(nodes[2], 1); + + send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + (Some(payment_preimage_4), Some(payment_hash_4)) + } else { (None, None) }; + + // Restore monitor updating, ensuring we immediately get a fail-back update and a + // update_add update. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 2); + + let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); + if test_ignore_second_cs { + assert_eq!(events_3.len(), 3); + } else { + assert_eq!(events_3.len(), 2); + } + + // Note that the ordering of the events for different nodes is non-prescriptive, though the + // ordering of the two events that both go to nodes[2] have to stay in the same order. + let messages_a = match events_3.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fail_htlcs.remove(0), updates.commitment_signed) + }, + _ => panic!("Unexpected event type!"), + }; + let raa = if test_ignore_second_cs { + match events_3.remove(1) { + MessageSendEvent::SendRevokeAndACK { node_id, msg } => { + assert_eq!(node_id, nodes[2].node.get_our_node_id()); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }; + let send_event_b = SendEvent::from_event(events_3.remove(0)); + assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + + // Now deliver the new messages... + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events_4[0] { + assert_eq!(payment_hash, payment_hash_1); + assert!(rejected_by_dest); + } else { panic!("Unexpected event!"); } + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap(); + if test_ignore_second_cs { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(bs_cs.update_add_htlcs.is_empty()); + assert!(bs_cs.update_fail_htlcs.is_empty()); + assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); + assert!(bs_cs.update_fulfill_htlcs.is_empty()); + assert!(bs_cs.update_fee.is_none()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + assert!(as_cs.update_add_htlcs.is_empty()); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } else { + commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); } - confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version); - let events_2 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - match events_2[0] { - Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert!(announcement_sigs.is_none()); - }, + let events_5 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), - } + }; - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[2].node.process_pending_htlc_forwards(); - // TODO: We shouldn't need to manually pass list_usable_chanels here once we support - // rebroadcasting announcement_signatures upon reconnect. + let events_6 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_6.len(), 1); + match events_6[0] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + _ => panic!("Unexpected event"), + }; - let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); - let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); - } + if test_ignore_second_cs { + let events_7 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_7.len(), 1); + match events_7[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; - #[test] - fn test_drop_messages_peer_disconnect_dual_htlc() { - // Test that we can handle reconnecting when both sides of a channel have pending - // commitment_updates when we disconnect. - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); - let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + send_event = SendEvent::from_node(&nodes[1]); + assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.msgs.len(), 1); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); - // Now try to send a second payment which will fail to send - let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); - let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let events_8 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_8.len(), 1); + match events_8[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; - nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap(); - check_added_monitors!(nodes[0], 1); + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); - let events_1 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::UpdateHTLCs { .. } => {}, - _ => panic!("Unexpected event"), + let events_9 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_9.len(), 1); + match events_9[0] { + Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + _ => panic!("Unexpected event"), + }; + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); } - assert!(nodes[1].node.claim_funds(payment_preimage_1)); - check_added_monitors!(nodes[1], 1); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + } - let events_2 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - match events_2[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); + #[test] + fn test_monitor_update_fail_raa() { + do_test_monitor_update_fail_raa(false); + do_test_monitor_update_fail_raa(true); + } - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); - let events_3 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(*payment_preimage, payment_preimage_1); - }, - _ => panic!("Unexpected event"), - } + #[test] + fn test_monitor_update_fail_reestablish() { + // Simple test for message retransmission after monitor update failure on + // channel_reestablish generating a monitor update (which comes from freeing holding cell + // HTLCs). + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); - let (_, commitment_update) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); - assert!(commitment_update.is_none()); - check_added_monitors!(nodes[0], 1); - }, - _ => panic!("Unexpected event"), - } + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); - assert_eq!(reestablish_1.len(), 1); - let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - assert_eq!(reestablish_2.len(), 1); - - let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); - let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - assert!(as_resp.0.is_none()); - assert!(bs_resp.0.is_none()); + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - assert!(bs_resp.1.is_none()); - assert!(bs_resp.2.is_none()); + let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1); - assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty()); - assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); - assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); - assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap(); - let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap(); - assert!(bs_commitment_signed.is_none()); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } check_added_monitors!(nodes[1], 1); - let bs_second_commitment_signed = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap().unwrap(); - assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); - assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); - assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); - assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); - assert!(bs_second_commitment_signed.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - let as_commitment_signed = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); - assert!(as_commitment_signed.update_add_htlcs.is_empty()); - assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); - assert!(as_commitment_signed.update_fail_htlcs.is_empty()); - assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); - assert!(as_commitment_signed.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - let (as_revoke_and_ack, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap(); - assert!(as_second_commitment_signed.is_none()); - check_added_monitors!(nodes[0], 1); + assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); + assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); - let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap(); - assert!(bs_third_commitment_signed.is_none()); - check_added_monitors!(nodes[1], 1); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let events_4 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 1); - match events_4[0] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); - nodes[1].node.process_pending_htlc_forwards(); + updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); - let events_5 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_5.len(), 1); - match events_5[0] { - Event::PaymentReceived { ref payment_hash, amt: _ } => { - assert_eq!(payment_hash_2, *payment_hash); - }, + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), _ => panic!("Unexpected event"), } - - assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[0], 1); - - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } #[test] - fn test_simple_monitor_permanent_update_fail() { - // Test that we handle a simple permanent monitor update failure - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); - - let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); - let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + fn test_invalid_channel_announcement() { + //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs + let secp_ctx = Secp256k1::new(); + let nodes = create_network(2); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } - check_added_monitors!(nodes[0], 1); + let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]); - let events_1 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::BroadcastChannelUpdate { .. } => {}, - _ => panic!("Unexpected event"), - }; + let a_channel_lock = nodes[0].node.channel_state.lock().unwrap(); + let b_channel_lock = nodes[1].node.channel_state.lock().unwrap(); + let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap(); + let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap(); - // TODO: Once we hit the chain with the failure transaction we should check that we get a - // PaymentFailed event + let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); - assert_eq!(nodes[0].node.list_channels().len(), 0); - } + let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key); + let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key); - fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { - // Test that we can recover from a simple temporary monitor update failure optionally with - // a disconnect in between - let mut nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + let as_network_key = nodes[0].node.get_our_node_id(); + let bs_network_key = nodes[1].node.get_our_node_id(); - let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); - let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..]; - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); } - check_added_monitors!(nodes[0], 1); + let mut chan_announcement; - let events_1 = nodes[0].node.get_and_clear_pending_events(); - assert!(events_1.is_empty()); - assert_eq!(nodes[0].node.list_channels().len(), 1); + macro_rules! dummy_unsigned_msg { + () => { + msgs::UnsignedChannelAnnouncement { + features: msgs::GlobalFeatures::new(), + chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(), + short_channel_id: as_chan.get_short_channel_id().unwrap(), + node_id_1: if were_node_one { as_network_key } else { bs_network_key }, + node_id_2: if were_node_one { bs_network_key } else { as_network_key }, + bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key }, + bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key }, + excess_data: Vec::new(), + }; + } + } - if disconnect { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + macro_rules! sign_msg { + ($unsigned_msg: expr) => { + let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap(); + let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key); + let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key); + let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key); + let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key); + chan_announcement = msgs::ChannelAnnouncement { + node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig}, + node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig}, + bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig }, + bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig }, + contents: $unsigned_msg + } + } } - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + let unsigned_msg = dummy_unsigned_msg!(); + sign_msg!(unsigned_msg); + assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true); + let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); - let mut events_2 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - let payment_event = SendEvent::from_event(events_2.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // Configured with Network::Testnet + let mut unsigned_msg = dummy_unsigned_msg!(); + unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash(); + sign_msg!(unsigned_msg); + assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); - expect_pending_htlcs_forwardable!(nodes[1]); + let mut unsigned_msg = dummy_unsigned_msg!(); + unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]); + sign_msg!(unsigned_msg); + assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); + } - let events_3 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - match events_3[0] { - Event::PaymentReceived { ref payment_hash, amt } => { - assert_eq!(payment_hash_1, *payment_hash); - assert_eq!(amt, 1000000); - }, - _ => panic!("Unexpected event"), + struct VecWriter(Vec); + impl Writer for VecWriter { + fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> { + self.0.extend_from_slice(buf); + Ok(()) + } + fn size_hint(&mut self, size: usize) { + self.0.reserve_exact(size); } + } - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + #[test] + fn test_no_txn_manager_serialize_deserialize() { + let mut nodes = create_network(2); + + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - // Now set it to failed again... - let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); } - check_added_monitors!(nodes[0], 1); + let nodes_0_serialized = nodes[0].node.encode(); + let mut chan_0_monitor_serialized = VecWriter(Vec::new()); + nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); + + nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()))); + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let config = UserConfig::new(); + let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()))); + let (_, nodes_0_deserialized) = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor); + <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: config, + keys_manager, + fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }), + monitor: nodes[0].chan_monitor.clone(), + chain_monitor: nodes[0].chain_monitor.clone(), + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: Arc::new(test_utils::TestLogger::new()), + channel_monitors: &channel_monitors, + }).unwrap() + }; + assert!(nodes_0_read.is_empty()); - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert!(events_4.is_empty()); + assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok()); + nodes[0].node = Arc::new(nodes_0_deserialized); + let nodes_0_as_listener: Arc = nodes[0].node.clone(); + nodes[0].chain_monitor.register_listener(Arc::downgrade(&nodes_0_as_listener)); assert_eq!(nodes[0].node.list_channels().len(), 1); + check_added_monitors!(nodes[0], 1); - if disconnect { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - } + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); - // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - let events_5 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_5.len(), 1); - match events_5[0] { - Event::BroadcastChannelUpdate { .. } => {}, - _ => panic!("Unexpected event"), + let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked); + for node in nodes.iter() { + assert!(node.router.handle_channel_announcement(&announcement).unwrap()); + node.router.handle_channel_update(&as_update).unwrap(); + node.router.handle_channel_update(&bs_update).unwrap(); } - // TODO: Once we hit the chain with the failure transaction we should check that we get a - // PaymentFailed event - - assert_eq!(nodes[0].node.list_channels().len(), 0); + send_payment(&nodes[0], &[&nodes[1]], 1000000); } #[test] - fn test_simple_monitor_temporary_update_fail() { - do_test_simple_monitor_temporary_update_fail(false); - do_test_simple_monitor_temporary_update_fail(true); - } - - fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { - let disconnect_flags = 8 | 16; - - // Test that we can recover from a temporary monitor update failure with some in-flight - // HTLCs going on at the same time potentially with some disconnection thrown in. - // * First we route a payment, then get a temporary monitor update failure when trying to - // route a second payment. We then claim the first payment. - // * If disconnect_count is set, we will disconnect at this point (which is likely as - // TemporaryFailure likely indicates net disconnect which resulted in failing to update - // the ChannelMonitor on a watchtower). - // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment - // immediately, otherwise we wait sconnect and deliver them via the reconnect - // channel_reestablish processing (ie disconnect_count & 16 makes no sense if - // disconnect_count & !disconnect_flags is 0). - // * We then update the channel monitor, reconnecting if disconnect_count is set and walk - // through message sending, potentially disconnect/reconnecting multiple times based on - // disconnect_count, to get the update_fulfill_htlc through. - // * We then walk through more message exchanges to get the original update_add_htlc - // through, swapping message ordering based on disconnect_count & 8 and optionally - // disconnect/reconnecting based on disconnect_count. + fn test_simple_manager_serialize_deserialize() { let mut nodes = create_network(2); create_announced_chan_between_nodes(&nodes, 0, 1); - let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - // Now try to send a second payment which will fail to send - let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); - let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); } + let nodes_0_serialized = nodes[0].node.encode(); + let mut chan_0_monitor_serialized = VecWriter(Vec::new()); + nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); + + nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()))); + let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; + let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap(); + assert!(chan_0_monitor_read.is_empty()); + + let mut nodes_0_read = &nodes_0_serialized[..]; + let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()))); + let (_, nodes_0_deserialized) = { + let mut channel_monitors = HashMap::new(); + channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor); + <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: UserConfig::new(), + keys_manager, + fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }), + monitor: nodes[0].chan_monitor.clone(), + chain_monitor: nodes[0].chain_monitor.clone(), + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: Arc::new(test_utils::TestLogger::new()), + channel_monitors: &channel_monitors, + }).unwrap() + }; + assert!(nodes_0_read.is_empty()); + + assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok()); + nodes[0].node = Arc::new(nodes_0_deserialized); check_added_monitors!(nodes[0], 1); - let events_1 = nodes[0].node.get_and_clear_pending_events(); - assert!(events_1.is_empty()); - assert_eq!(nodes[0].node.list_channels().len(), 1); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] - // but nodes[0] won't respond since it is frozen. - assert!(nodes[1].node.claim_funds(payment_preimage_1)); - check_added_monitors!(nodes[1], 1); - let events_2 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); + fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash); + claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); + } - if (disconnect_count & 16) == 0 { - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); - let events_3 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(*payment_preimage, payment_preimage_1); - }, - _ => panic!("Unexpected event"), - } + #[test] + fn test_manager_serialize_deserialize_inconsistent_monitor() { + // Test deserializing a ChannelManager with a out-of-date ChannelMonitor + let mut nodes = create_network(4); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 2, 0); + let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3); - if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { - assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); - } else { panic!(); } - } + let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000); - (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) - }, - _ => panic!("Unexpected event"), - }; + // Serialize the ChannelManager here, but the monitor we keep up-to-date + let nodes_0_serialized = nodes[0].node.encode(); - if disconnect_count & !disconnect_flags > 0 { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + route_payment(&nodes[0], &[&nodes[3]], 1000000); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/ + // nodes[3]) + let mut node_0_monitors_serialized = Vec::new(); + for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() { + let mut writer = VecWriter(Vec::new()); + monitor.1.write_for_disk(&mut writer).unwrap(); + node_0_monitors_serialized.push(writer.0); + } + + nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()))); + let mut node_0_monitors = Vec::new(); + for serialized in node_0_monitors_serialized.iter() { + let mut read = &serialized[..]; + let (_, monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut read, Arc::new(test_utils::TestLogger::new())).unwrap(); + assert!(read.is_empty()); + node_0_monitors.push(monitor); + } + + let mut nodes_0_read = &nodes_0_serialized[..]; + let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()))); + let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + default_config: UserConfig::new(), + keys_manager, + fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }), + monitor: nodes[0].chan_monitor.clone(), + chain_monitor: nodes[0].chain_monitor.clone(), + tx_broadcaster: nodes[0].tx_broadcaster.clone(), + logger: Arc::new(test_utils::TestLogger::new()), + channel_monitors: &node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().unwrap(), monitor) }).collect(), + }).unwrap(); + assert!(nodes_0_read.is_empty()); + + { // Channel close should result in a commitment tx and an HTLC tx + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(txn.len(), 2); + assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid()); + assert_eq!(txn[1].input[0].previous_output.txid, txn[0].txid()); + } + + for monitor in node_0_monitors.drain(..) { + assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok()); + check_added_monitors!(nodes[0], 1); } + nodes[0].node = Arc::new(nodes_0_deserialized); - // Now fix monitor updating... - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - nodes[0].node.test_restore_channel_monitor(); - check_added_monitors!(nodes[0], 1); + // nodes[1] and nodes[2] have no lost state with nodes[0]... + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + //... and we can even still claim the payment! + claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); - macro_rules! disconnect_reconnect_peers { () => { { - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id()); + let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id()); + if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) { + assert_eq!(msg.channel_id, channel_id); + } else { panic!("Unexpected result"); } + } - let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); - assert_eq!(reestablish_1.len(), 1); - let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - assert_eq!(reestablish_2.len(), 1); + macro_rules! check_spendable_outputs { + ($node: expr, $der_idx: expr) => { + { + let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events(); + let mut txn = Vec::new(); + for event in events { + match event { + Event::SpendableOutputs { ref outputs } => { + for outp in outputs { + match *outp { + SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => { + let input = TxIn { + previous_output: outpoint.clone(), + script_sig: Script::new(), + sequence: 0, + witness: Vec::new(), + }; + let outp = TxOut { + script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), + value: output.value, + }; + let mut spend_tx = Transaction { + version: 2, + lock_time: 0, + input: vec![input], + output: vec![outp], + }; + let secp_ctx = Secp256k1::new(); + let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key); + let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey(); + let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); + let remotesig = secp_ctx.sign(&sighash, key); + spend_tx.input[0].witness.push(remotesig.serialize_der(&secp_ctx).to_vec()); + spend_tx.input[0].witness[0].push(SigHashType::All as u8); + spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec()); + txn.push(spend_tx); + }, + SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => { + let input = TxIn { + previous_output: outpoint.clone(), + script_sig: Script::new(), + sequence: *to_self_delay as u32, + witness: Vec::new(), + }; + let outp = TxOut { + script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), + value: output.value, + }; + let mut spend_tx = Transaction { + version: 2, + lock_time: 0, + input: vec![input], + output: vec![outp], + }; + let secp_ctx = Secp256k1::new(); + let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], witness_script, output.value)[..]).unwrap(); + let local_delaysig = secp_ctx.sign(&sighash, key); + spend_tx.input[0].witness.push(local_delaysig.serialize_der(&secp_ctx).to_vec()); + spend_tx.input[0].witness[0].push(SigHashType::All as u8); + spend_tx.input[0].witness.push(vec!(0)); + spend_tx.input[0].witness.push(witness_script.clone().into_bytes()); + txn.push(spend_tx); + }, + SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { + let secp_ctx = Secp256k1::new(); + let input = TxIn { + previous_output: outpoint.clone(), + script_sig: Script::new(), + sequence: 0, + witness: Vec::new(), + }; + let outp = TxOut { + script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), + value: output.value, + }; + let mut spend_tx = Transaction { + version: 2, + lock_time: 0, + input: vec![input], + output: vec![outp.clone()], + }; + let secret = { + match ExtendedPrivKey::new_master(&secp_ctx, Network::Testnet, &$node.node_seed) { + Ok(master_key) => { + match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) { + Ok(key) => key, + Err(_) => panic!("Your RNG is busted"), + } + } + Err(_) => panic!("Your rng is busted"), + } + }; + let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; + let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); + let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); + let sig = secp_ctx.sign(&sighash, &secret.secret_key); + spend_tx.input[0].witness.push(sig.serialize_der(&secp_ctx).to_vec()); + spend_tx.input[0].witness[0].push(SigHashType::All as u8); + spend_tx.input[0].witness.push(pubkey.serialize().to_vec()); + txn.push(spend_tx); + }, + } + } + }, + _ => panic!("Unexpected event"), + }; + } + txn + } + } + } - let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); - let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + #[test] + fn test_claim_sizeable_push_msat() { + // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx + let nodes = create_network(2); - assert!(as_resp.0.is_none()); - assert!(bs_resp.0.is_none()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000); + nodes[1].node.force_close_channel(&chan.2); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], chan.3.clone()); + assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening - (reestablish_1, reestablish_2, as_resp, bs_resp) - } } } + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], node_txn[0].clone()); + } - let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { - let events_4 = nodes[0].node.get_and_clear_pending_events(); - assert!(events_4.is_empty()); + #[test] + fn test_claim_on_remote_sizeable_push_msat() { + // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and + // to_remote output is encumbered by a P2WPKH - let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); - assert_eq!(reestablish_1.len(), 1); - let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - assert_eq!(reestablish_2.len(), 1); + let nodes = create_network(2); - let mut as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); - check_added_monitors!(nodes[0], 0); - let mut bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); - check_added_monitors!(nodes[1], 0); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000); + nodes[0].node.force_close_channel(&chan.2); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], chan.3.clone()); + assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening - assert!(as_resp.0.is_none()); - assert!(bs_resp.0.is_none()); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); + } - assert!(bs_resp.1.is_none()); - if (disconnect_count & 16) == 0 { - assert!(bs_resp.2.is_none()); + #[test] + fn test_claim_on_remote_revoked_sizeable_push_msat() { + // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and + // to_remote output is encumbered by a P2WPKH - assert!(as_resp.1.is_some()); - assert!(as_resp.2.is_some()); - assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); - } else { - assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); - assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); - assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); - assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); - assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); - assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + let nodes = create_network(2); - assert!(as_resp.1.is_none()); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap(); - let events_3 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_3.len(), 1); - match events_3[0] { - Event::PaymentSent { ref payment_preimage } => { - assert_eq!(*payment_preimage, payment_preimage_1); - }, - _ => panic!("Unexpected event"), - } + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 4); + assert_eq!(spend_txn[0], spend_txn[2]); // to_remote output on revoked remote commitment_tx + check_spends!(spend_txn[0], revoked_local_txn[0].clone()); + assert_eq!(spend_txn[1], spend_txn[3]); // to_local output on local commitment tx + check_spends!(spend_txn[1], node_txn[0].clone()); + } - let (as_resp_raa, as_resp_cu) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap(); - assert!(as_resp_cu.is_none()); - check_added_monitors!(nodes[0], 1); + #[test] + fn test_static_spendable_outputs_preimage_tx() { + let nodes = create_network(2); - as_resp.1 = Some(as_resp_raa); - bs_resp.2 = None; - } + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - if disconnect_count & !disconnect_flags > 1 { - let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - if (disconnect_count & 16) == 0 { - assert!(reestablish_1 == second_reestablish_1); - assert!(reestablish_2 == second_reestablish_2); - } - assert!(as_resp == second_as_resp); - assert!(bs_resp == second_bs_resp); - } + let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(commitment_tx[0].input.len(), 1); + assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid()); - (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap()) - } else { - let mut events_4 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_4.len(), 2); - (SendEvent::from_event(events_4.remove(0)), match events_4[0] { - Event::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg.clone() - }, - _ => panic!("Unexpected event"), - }) - }; + // Settle A's commitment tx on B's chain + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + assert!(nodes[1].node.claim_funds(payment_preimage)); + check_added_monitors!(nodes[1], 1); + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 1 (local commitment tx), ChannelMonitor: 2 (1 preimage tx) * 2 (block-rescan) + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0], node_txn[2]); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[1], chan_1.3.clone()); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting an RAA from nodes[0] still - check_added_monitors!(nodes[1], 1); + let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); + } - if disconnect_count & !disconnect_flags > 2 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + #[test] + fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { + let nodes = create_network(2); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - assert!(as_resp.2.is_none()); - assert!(bs_resp.2.is_none()); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); + + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), } + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn.pop().unwrap(), node_txn[0]); + assert_eq!(node_txn[0].input.len(), 2); + check_spends!(node_txn[0], revoked_local_txn[0].clone()); - let as_commitment_update; - let bs_second_commitment_update; + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); + } + + #[test] + fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { + let nodes = create_network(2); + + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - macro_rules! handle_bs_raa { () => { - as_commitment_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); - assert!(as_commitment_update.update_add_htlcs.is_empty()); - assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(as_commitment_update.update_fail_htlcs.is_empty()); - assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); - } } + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); - macro_rules! handle_initial_raa { () => { - bs_second_commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap().unwrap(); - assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); - } } + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); - if (disconnect_count & 8) == 0 { - handle_bs_raa!(); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + // A will generate HTLC-Timeout from revoked commitment tx + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); + assert_eq!(revoked_htlc_txn[0].input.len(), 1); + assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); + check_spends!(revoked_htlc_txn[1], chan_1.3.clone()); + + // B will generate justice tx from A's revoked commitment/HTLC tx + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } - if disconnect_count & !disconnect_flags > 3 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[3].input.len(), 1); + check_spends!(node_txn[3], revoked_htlc_txn[0].clone()); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); - assert!(bs_resp.1.is_none()); + // Check B's ChannelMonitor was able to generate the right spendable output descriptor + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 3); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[2], node_txn[3].clone()); + } - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.is_none()); + #[test] + fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { + let nodes = create_network(2); - assert!(as_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); - } + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - handle_initial_raa!(); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); - if disconnect_count & !disconnect_flags > 4 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); - assert!(as_resp.1.is_none()); - assert!(bs_resp.1.is_none()); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + // B will generate HTLC-Success from revoked commitment tx + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); - } - } else { - handle_initial_raa!(); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); + assert_eq!(revoked_htlc_txn[0].input.len(), 1); + assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); - if disconnect_count & !disconnect_flags > 3 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + // A will generate justice tx from B's revoked commitment/HTLC tx + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } - assert!(as_resp.1.is_none()); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[3].input.len(), 1); + check_spends!(node_txn[3], revoked_htlc_txn[0].clone()); - assert!(as_resp.2.is_none()); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + // Check A's ChannelMonitor was able to generate the right spendable output descriptor + let spend_txn = check_spendable_outputs!(nodes[0], 1); + assert_eq!(spend_txn.len(), 5); + assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[1], spend_txn[3]); + check_spends!(spend_txn[0], revoked_local_txn[0].clone()); // spending to_remote output from revoked local tx + check_spends!(spend_txn[1], node_txn[2].clone()); // spending justice tx output from revoked local tx htlc received output + check_spends!(spend_txn[4], node_txn[3].clone()); // spending justice tx output on htlc success tx + } - assert!(bs_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); - } + #[test] + fn test_onchain_to_onchain_claim() { + // Test that in case of channel closure, we detect the state of output thanks to + // ChainWatchInterface and claim HTLC on downstream peer's remote commitment tx. + // First, have C claim an HTLC against its own latest commitment transaction. + // Then, broadcast these to B, which should update the monitor downstream on the A<->B + // channel. + // Finally, check that B will claim the HTLC output if A's latest commitment transaction + // gets broadcast. - handle_bs_raa!(); + let nodes = create_network(3); - if disconnect_count & !disconnect_flags > 4 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - assert!(as_resp.1.is_none()); - assert!(bs_resp.1.is_none()); + // Rebalance the network a bit by relaying one payment through all the channels ... + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); - } + let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_2.3.clone()); + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), } - let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); - assert!(as_commitment_signed.is_none()); - check_added_monitors!(nodes[0], 1); + let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx) + assert_eq!(c_txn.len(), 3); + assert_eq!(c_txn[0], c_txn[2]); + assert_eq!(commitment_tx[0], c_txn[1]); + check_spends!(c_txn[1], chan_2.3.clone()); + check_spends!(c_txn[2], c_txn[1].clone()); + assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_eq!(c_txn[0].lock_time, 0); // Success tx - let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); - assert!(bs_third_commitment_signed.is_none()); + // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}, 1); + { + let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(b_txn.len(), 4); + assert_eq!(b_txn[0], b_txn[3]); + check_spends!(b_txn[1], chan_2.3); // B local commitment tx, issued by ChannelManager + check_spends!(b_txn[2], b_txn[1].clone()); // HTLC-Timeout on B local commitment tx, issued by ChannelManager + assert_eq!(b_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_ne!(b_txn[2].lock_time, 0); // Timeout tx + check_spends!(b_txn[0], c_txn[1].clone()); // timeout tx on C remote commitment tx, issued by ChannelMonitor, * 2 due to block rescan + assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert_ne!(b_txn[2].lock_time, 0); // Timeout tx + b_txn.clear(); + } + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[1], 1); + match msg_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + match msg_events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), + }; + // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx + let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(b_txn.len(), 3); + check_spends!(b_txn[1], chan_1.3); // Local commitment tx, issued by ChannelManager + assert_eq!(b_txn[0], b_txn[2]); // HTLC-Success tx, issued by ChannelMonitor, * 2 due to block rescan + check_spends!(b_txn[0], commitment_tx[0].clone()); + assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert_eq!(b_txn[2].lock_time, 0); // Success tx + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + match msg_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + } - assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[1], 1); + #[test] + fn test_duplicate_payment_hash_one_failure_one_success() { + // Topology : A --> B --> C + // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim + let mut nodes = create_network(3); - assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); - check_added_monitors!(nodes[0], 1); + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - expect_pending_htlcs_forwardable!(nodes[1]); + let (our_payment_preimage, duplicate_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000); + *nodes[0].network_payment_count.borrow_mut() -= 1; + assert_eq!(route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000).1, duplicate_payment_hash); - let events_5 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_5.len(), 1); - match events_5[0] { - Event::PaymentReceived { ref payment_hash, amt } => { - assert_eq!(payment_hash_2, *payment_hash); - assert_eq!(amt, 1000000); - }, + let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(commitment_txn[0].input.len(), 1); + check_spends!(commitment_txn[0], chan_2.3.clone()); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1); + let htlc_timeout_tx; + { // Extract one of the two HTLC-Timeout transaction + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 7); + assert_eq!(node_txn[0], node_txn[5]); + assert_eq!(node_txn[1], node_txn[6]); + check_spends!(node_txn[0], commitment_txn[0].clone()); + assert_eq!(node_txn[0].input.len(), 1); + check_spends!(node_txn[1], commitment_txn[0].clone()); + assert_eq!(node_txn[1].input.len(), 1); + assert_ne!(node_txn[0].input[0], node_txn[1].input[0]); + check_spends!(node_txn[2], chan_2.3.clone()); + check_spends!(node_txn[3], node_txn[2].clone()); + check_spends!(node_txn[4], node_txn[2].clone()); + htlc_timeout_tx = node_txn[1].clone(); + } + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } + + nodes[2].node.claim_funds(our_payment_preimage); + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1); + check_added_monitors!(nodes[2], 2); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::UpdateHTLCs { .. } => {}, _ => panic!("Unexpected event"), } + match events[1] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } + let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + assert_eq!(htlc_success_txn.len(), 5); + check_spends!(htlc_success_txn[2], chan_2.3.clone()); + assert_eq!(htlc_success_txn[0], htlc_success_txn[3]); + assert_eq!(htlc_success_txn[0].input.len(), 1); + assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!(htlc_success_txn[1], htlc_success_txn[4]); + assert_eq!(htlc_success_txn[1].input.len(), 1); + assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_ne!(htlc_success_txn[0].input[0], htlc_success_txn[1].input[0]); + check_spends!(htlc_success_txn[0], commitment_txn[0].clone()); + check_spends!(htlc_success_txn[1], commitment_txn[0].clone()); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200); + let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(htlc_updates.update_add_htlcs.is_empty()); + assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); + assert_eq!(htlc_updates.update_fail_htlcs[0].htlc_id, 1); + assert!(htlc_updates.update_fulfill_htlcs.is_empty()); + assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); + check_added_monitors!(nodes[1], 1); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); - } + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + { + commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelClosed { .. } } => { + }, + _ => { panic!("Unexpected event"); } + } + } + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, duplicate_payment_hash); + } + _ => panic!("Unexpected event"), + } - #[test] - fn test_monitor_temporary_update_fail_a() { - do_test_monitor_temporary_update_fail(0); - do_test_monitor_temporary_update_fail(1); - do_test_monitor_temporary_update_fail(2); - do_test_monitor_temporary_update_fail(3); - do_test_monitor_temporary_update_fail(4); - do_test_monitor_temporary_update_fail(5); - } + // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert_eq!(updates.update_fulfill_htlcs[0].htlc_id, 0); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + check_added_monitors!(nodes[1], 1); - #[test] - fn test_monitor_temporary_update_fail_b() { - do_test_monitor_temporary_update_fail(2 | 8); - do_test_monitor_temporary_update_fail(3 | 8); - do_test_monitor_temporary_update_fail(4 | 8); - do_test_monitor_temporary_update_fail(5 | 8); - } + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - #[test] - fn test_monitor_temporary_update_fail_c() { - do_test_monitor_temporary_update_fail(1 | 16); - do_test_monitor_temporary_update_fail(2 | 16); - do_test_monitor_temporary_update_fail(3 | 16); - do_test_monitor_temporary_update_fail(2 | 8 | 16); - do_test_monitor_temporary_update_fail(3 | 8 | 16); + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, our_payment_preimage); + } + _ => panic!("Unexpected event"), + } } #[test] - fn test_invalid_channel_announcement() { - //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs - let secp_ctx = Secp256k1::new(); + fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let nodes = create_network(2); - let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let a_channel_lock = nodes[0].node.channel_state.lock().unwrap(); - let b_channel_lock = nodes[1].node.channel_state.lock().unwrap(); - let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap(); - let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap(); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0; + let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(local_txn[0].input.len(), 1); + check_spends!(local_txn[0], chan_1.3.clone()); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); + // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx + nodes[1].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[1], 1); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn[0].clone()); - let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key); - let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key); + // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 2); + check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[1], node_txn[2].clone()); + } - let as_network_key = nodes[0].node.get_our_node_id(); - let bs_network_key = nodes[1].node.get_our_node_id(); + #[test] + fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { + let nodes = create_network(2); - let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..]; + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let mut chan_announcement; + route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0; + let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(local_txn[0].input.len(), 1); + check_spends!(local_txn[0], chan_1.3.clone()); - macro_rules! dummy_unsigned_msg { - () => { - msgs::UnsignedChannelAnnouncement { - features: msgs::GlobalFeatures::new(), - chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(), - short_channel_id: as_chan.get_short_channel_id().unwrap(), - node_id_1: if were_node_one { as_network_key } else { bs_network_key }, - node_id_2: if were_node_one { bs_network_key } else { as_network_key }, - bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key }, - bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key }, - excess_data: Vec::new(), - }; - } + // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 200); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), } + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn[0].clone()); + + // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor + let spend_txn = check_spendable_outputs!(nodes[0], 1); + assert_eq!(spend_txn.len(), 8); + assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[0], spend_txn[4]); + assert_eq!(spend_txn[0], spend_txn[6]); + assert_eq!(spend_txn[1], spend_txn[3]); + assert_eq!(spend_txn[1], spend_txn[5]); + assert_eq!(spend_txn[1], spend_txn[7]); + check_spends!(spend_txn[0], local_txn[0].clone()); + check_spends!(spend_txn[1], node_txn[0].clone()); + } - macro_rules! sign_msg { - ($unsigned_msg: expr) => { - let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap(); - let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key); - let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key); - let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key); - let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key); - chan_announcement = msgs::ChannelAnnouncement { - node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig}, - node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig}, - bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig }, - bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig }, - contents: $unsigned_msg - } - } - } + #[test] + fn test_static_output_closing_tx() { + let nodes = create_network(2); - let unsigned_msg = dummy_unsigned_msg!(); - sign_msg!(unsigned_msg); - assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - // Configured with Network::Testnet - let mut unsigned_msg = dummy_unsigned_msg!(); - unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash(); - sign_msg!(unsigned_msg); - assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; - let mut unsigned_msg = dummy_unsigned_msg!(); - unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]); - sign_msg!(unsigned_msg); - assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1); + let spend_txn = check_spendable_outputs!(nodes[0], 2); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx.clone()); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1); + let spend_txn = check_spendable_outputs!(nodes[1], 2); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx); } }