X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=d6ba67c2b36dcf24d23e3c880ccf64ddcb5a7b19;hb=497643a65b67ee0ecc7d09a94459c0f42cc5ac99;hp=748b71dcf64d576541c6b56c678c891b209e0c82;hpb=d6726d6f26fd799e4bac7a206fd7e62f3a779b75;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 748b71dc..d6ba67c2 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -1,3 +1,13 @@ +//! The top-level channel management and payment tracking stuff lives here. +//! +//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is +//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those +//! upon reconnect to the relevant peer(s). +//! +//! It does not manage routing logic (see ln::router for that) nor does it manage constructing +//! on-chain transactions (it only monitors the chain to watch for any force-closes that might +//! imply it needs to fail HTLCs/payments/channels it manages). + use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; @@ -12,13 +22,14 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; -use ln::channel::{Channel, ChannelKeys}; -use ln::channelmonitor::ManyChannelMonitor; +use ln::channel::{Channel, ChannelError, ChannelKeys}; +use ln::channelmonitor::{ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route,RouteHop}; use ln::msgs; -use ln::msgs::{HandleError,ChannelMessageHandler,MsgEncodable,MsgDecodable}; +use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder}; use util::{byte_utils, events, internal_traits, rng}; use util::sha2::Sha256; +use util::ser::{Readable, Writeable}; use util::chacha20poly1305rfc::ChaCha20; use util::logger::Logger; use util::errors::APIError; @@ -32,20 +43,36 @@ use crypto::symmetriccipher::SynchronousStreamCipher; use std::{ptr, mem}; use std::collections::HashMap; use std::collections::hash_map; +use std::io::Cursor; use std::sync::{Mutex,MutexGuard,Arc}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::{Instant,Duration}; +/// We hold various information about HTLC relay in the HTLC objects in Channel itself: +/// +/// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should +/// forward the HTLC with information it will give back to us when it does so, or if it should Fail +/// the HTLC with the relevant message for the Channel to handle giving to the remote peer. +/// +/// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo +/// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData +/// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill +/// the HTLC backwards along the relevant path). +/// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is +/// our payment, which we can use to decode errors or inform the user that the payment was sent. mod channel_held_info { use ln::msgs; + use ln::router::Route; + use secp256k1::key::SecretKey; + use secp256k1::ecdh::SharedSecret; /// Stores the info we will need to send when we want to forward an HTLC onwards #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub struct PendingForwardHTLCInfo { pub(super) onion_packet: Option, + pub(super) incoming_shared_secret: SharedSecret, pub(super) payment_hash: [u8; 32], pub(super) short_channel_id: u64, - pub(super) prev_short_channel_id: u64, pub(super) amt_to_forward: u64, pub(super) outgoing_cltv_value: u32, } @@ -63,22 +90,39 @@ mod channel_held_info { Fail(HTLCFailureMsg), } - #[cfg(feature = "fuzztarget")] - impl PendingHTLCStatus { + /// Tracks the inbound corresponding to an outbound HTLC + #[derive(Clone)] + pub struct HTLCPreviousHopData { + pub(super) short_channel_id: u64, + pub(super) htlc_id: u64, + pub(super) incoming_packet_shared_secret: SharedSecret, + } + + /// Tracks the inbound corresponding to an outbound HTLC + #[derive(Clone)] + pub enum HTLCSource { + PreviousHopData(HTLCPreviousHopData), + OutboundRoute { + route: Route, + session_priv: SecretKey, + /// Technically we can recalculate this from the route, but we cache it here to avoid + /// doing a double-pass on route when we get a failure back + first_hop_htlc_msat: u64, + }, + } + #[cfg(test)] + impl HTLCSource { pub fn dummy() -> Self { - PendingHTLCStatus::Forward(PendingForwardHTLCInfo { - onion_packet: None, - payment_hash: [0; 32], - short_channel_id: 0, - prev_short_channel_id: 0, - amt_to_forward: 0, - outgoing_cltv_value: 0, - }) + HTLCSource::OutboundRoute { + route: Route { hops: Vec::new() }, + session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(), + first_hop_htlc_msat: 0, + } } } #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug - pub enum HTLCFailReason { + pub(crate) enum HTLCFailReason { ErrorPacket { err: msgs::OnionErrorPacket, }, @@ -87,38 +131,8 @@ mod channel_held_info { data: Vec, } } - - #[cfg(feature = "fuzztarget")] - impl HTLCFailReason { - pub fn dummy() -> Self { - HTLCFailReason::Reason { - failure_code: 0, data: Vec::new(), - } - } - } -} -#[cfg(feature = "fuzztarget")] -pub use self::channel_held_info::*; -#[cfg(not(feature = "fuzztarget"))] -pub(crate) use self::channel_held_info::*; - -enum PendingOutboundHTLC { - IntermediaryHopData { - source_short_channel_id: u64, - incoming_packet_shared_secret: SharedSecret, - }, - OutboundRoute { - route: Route, - session_priv: SecretKey, - }, - /// Used for channel rebalancing - CycledRoute { - source_short_channel_id: u64, - incoming_packet_shared_secret: SharedSecret, - route: Route, - session_priv: SecretKey, - } } +pub(super) use self::channel_held_info::*; struct MsgHandleErrInternal { err: msgs::HandleError, @@ -163,6 +177,48 @@ impl MsgHandleErrInternal { fn from_no_close(err: msgs::HandleError) -> Self { Self { err, needs_channel_force_close: false } } + #[inline] + fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + Self { + err: match err { + ChannelError::Ignore(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::IgnoreError), + }, + ChannelError::Close(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }), + }, + }, + needs_channel_force_close: false, + } + } + #[inline] + fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { + Self { + err: match err { + ChannelError::Ignore(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::IgnoreError), + }, + ChannelError::Close(msg) => HandleError { + err: msg, + action: Some(msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }), + }, + }, + needs_channel_force_close: true, + } + } } /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This @@ -171,6 +227,12 @@ impl MsgHandleErrInternal { /// probably increase this significantly. const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50; +struct HTLCForwardInfo { + prev_short_channel_id: u64, + prev_htlc_id: u64, + forward_info: PendingForwardHTLCInfo, +} + struct ChannelHolder { by_id: HashMap<[u8; 32], Channel>, short_to_id: HashMap, @@ -179,18 +241,18 @@ struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about there existing a channel with the short id here, nor the short /// ids in the PendingForwardHTLCInfo! - forward_htlcs: HashMap>, + forward_htlcs: HashMap>, /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - claimable_htlcs: HashMap<[u8; 32], PendingOutboundHTLC>, + claimable_htlcs: HashMap<[u8; 32], Vec>, } struct MutChannelHolder<'a> { by_id: &'a mut HashMap<[u8; 32], Channel>, short_to_id: &'a mut HashMap, next_forward: &'a mut Instant, - forward_htlcs: &'a mut HashMap>, - claimable_htlcs: &'a mut HashMap<[u8; 32], PendingOutboundHTLC>, + forward_htlcs: &'a mut HashMap>, + claimable_htlcs: &'a mut HashMap<[u8; 32], Vec>, } impl ChannelHolder { fn borrow_parts(&mut self) -> MutChannelHolder { @@ -209,6 +271,7 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. +/// /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through /// to individual Channels. pub struct ChannelManager { @@ -231,7 +294,27 @@ pub struct ChannelManager { logger: Arc, } +/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound +/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, +/// ie the node we forwarded the payment on to should always have enough room to reliably time out +/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the +/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO? +const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? + +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that +// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have +// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the +// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC. +#[deny(const_err)] +#[allow(dead_code)] +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER; + +// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See +// ChannelMontior::would_broadcast_at_height for a description of why this is needed. +#[deny(const_err)] +#[allow(dead_code)] +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER; macro_rules! secp_call { ( $res: expr, $err: expr ) => { @@ -252,6 +335,7 @@ struct OnionKeys { mu: [u8; 32], } +/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, /// thereafter this is the txid of the funding transaction xor the funding transaction output). @@ -261,17 +345,23 @@ pub struct ChannelDetails { /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. pub short_channel_id: Option, + /// The node_id of our counterparty pub remote_network_id: PublicKey, + /// The value, in satoshis, of this channel as appears in the funding output pub channel_value_satoshis: u64, /// The user_id passed in to create_channel, or 0 if the channel was inbound. pub user_id: u64, } impl ChannelManager { - /// Constructs a new ChannelManager to hold several channels and route between them. This is - /// the main "logic hub" for all channel-related actions, and implements ChannelMessageHandler. + /// Constructs a new ChannelManager to hold several channels and route between them. + /// + /// This is the main "logic hub" for all channel-related actions, and implements + /// ChannelMessageHandler. + /// /// fee_proportional_millionths is an optional fee to charge any payments routed through us. /// Non-proportional fees are fixed according to our risk using the provided fee estimator. + /// /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`! pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); @@ -307,12 +397,15 @@ impl ChannelManager { } /// Creates a new outbound channel to the given remote node and with the given value. + /// /// user_id will be provided back as user_channel_id in FundingGenerationReady and /// FundingBroadcastSafe events to allow tracking of which events correspond with which /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you /// may wish to avoid using 0 for user_id here. + /// /// If successful, will generate a SendOpenChannel event, so you should probably poll /// PeerManager::process_events afterwards. + /// /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> { let chan_keys = if cfg!(feature = "fuzztarget") { @@ -336,11 +429,17 @@ impl ChannelManager { }; let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?; - let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator)?; + let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator); let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.insert(channel.channel_id(), channel) { - Some(_) => panic!("RNG is bad???"), - None => {} + match channel_state.by_id.entry(channel.channel_id()) { + hash_map::Entry::Occupied(_) => { + if cfg!(feature = "fuzztarget") { + return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" }); + } else { + panic!("RNG is bad???"); + } + }, + hash_map::Entry::Vacant(entry) => { entry.insert(channel); } } let mut events = self.pending_events.lock().unwrap(); @@ -374,7 +473,10 @@ impl ChannelManager { let channel_state = self.channel_state.lock().unwrap(); let mut res = Vec::with_capacity(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter() { - if channel.is_usable() { + // Note we use is_live here instead of usable which leads to somewhat confused + // internal/external nomenclature, but that's ok cause that's probably what the user + // really wanted anyway. + if channel.is_live() { res.push(ChannelDetails { channel_id: (*channel_id).clone(), short_channel_id: channel.get_short_channel_id(), @@ -390,9 +492,10 @@ impl ChannelManager { /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. + /// /// May generate a SendShutdown event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), HandleError> { - let (res, node_id, chan_option) = { + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + let (mut res, node_id, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); match channel_state.by_id.entry(channel_id.clone()) { @@ -405,12 +508,12 @@ impl ChannelManager { (res, chan_entry.get().get_their_node_id(), Some(chan_entry.remove_entry().1)) } else { (res, chan_entry.get().get_their_node_id(), None) } }, - hash_map::Entry::Vacant(_) => return Err(HandleError{err: "No such channel", action: None}) + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"}) } }; - for payment_hash in res.1 { + for htlc_source in res.1.drain(..) { // unknown_next_peer...I dunno who that is anymore.... - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); } let chan_update = if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update(&chan) { @@ -433,11 +536,11 @@ impl ChannelManager { } #[inline] - fn finish_force_close_channel(&self, shutdown_res: (Vec, Vec<[u8; 32]>)) { - let (local_txn, failed_htlcs) = shutdown_res; - for payment_hash in failed_htlcs { + fn finish_force_close_channel(&self, shutdown_res: (Vec, Vec<(HTLCSource, [u8; 32])>)) { + let (local_txn, mut failed_htlcs) = shutdown_res; + for htlc_source in failed_htlcs.drain(..) { // unknown_next_peer...I dunno who that is anymore.... - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); } for tx in local_txn { self.tx_broadcaster.broadcast_transaction(&tx); @@ -483,6 +586,33 @@ impl ChannelManager { } } + fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { + match err { + ChannelMonitorUpdateErr::PermanentFailure => { + let mut chan = { + let channel_state = channel_state_lock.borrow_parts(); + let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); + if let Some(short_id) = chan.get_short_channel_id() { + channel_state.short_to_id.remove(&short_id); + } + chan + }; + mem::drop(channel_state_lock); + self.finish_force_close_channel(chan.force_shutdown()); + let mut events = self.pending_events.lock().unwrap(); + if let Ok(update) = self.get_channel_update(&chan) { + events.push(events::Event::BroadcastChannelUpdate { + msg: update + }); + } + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); + channel.monitor_update_failed(reason); + }, + } + } + #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) { ({ @@ -567,7 +697,7 @@ impl ChannelManager { } /// returns the hop data, as well as the first-hop value_msat and CLTV value we should send. - fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec, u64, u32), HandleError> { + fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec, u64, u32), APIError> { let mut cur_value_msat = 0u64; let mut cur_cltv = starting_htlc_offset; let mut last_short_channel_id = 0; @@ -592,11 +722,11 @@ impl ChannelManager { }; cur_value_msat += hop.fee_msat; if cur_value_msat >= 21000000 * 100000000 * 1000 { - return Err(HandleError{err: "Channel fees overflowed?!", action: None}); + return Err(APIError::RouteError{err: "Channel fees overflowed?!"}); } cur_cltv += hop.cltv_expiry_delta as u32; if cur_cltv >= 500000000 { - return Err(HandleError{err: "Channel CLTV overflowed?!", action: None}); + return Err(APIError::RouteError{err: "Channel CLTV overflowed?!"}); } last_short_channel_id = hop.short_channel_id; } @@ -623,7 +753,7 @@ impl ChannelManager { } const ZERO:[u8; 21*65] = [0; 21*65]; - fn construct_onion_packet(mut payloads: Vec, onion_keys: Vec, associated_data: &[u8; 32]) -> Result { + fn construct_onion_packet(mut payloads: Vec, onion_keys: Vec, associated_data: &[u8; 32]) -> msgs::OnionPacket { let mut buf = Vec::with_capacity(21*65); buf.resize(21*65, 0); @@ -664,12 +794,12 @@ impl ChannelManager { hmac.raw_result(&mut hmac_res); } - Ok(msgs::OnionPacket{ + msgs::OnionPacket{ version: 0, public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey), hop_data: packet_data, hmac: hmac_res, - }) + } } /// Encrypts a failure packet. raw_packet can either be a @@ -722,7 +852,7 @@ impl ChannelManager { ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..]) } - fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, Option, MutexGuard) { + fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard) { macro_rules! get_onion_hash { () => { { @@ -742,7 +872,7 @@ impl ChannelManager { htlc_id: msg.htlc_id, sha256_of_onion: get_onion_hash!(), failure_code: 0x8000 | 0x4000 | 6, - })), None, self.channel_state.lock().unwrap()); + })), self.channel_state.lock().unwrap()); } let shared_secret = SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key); @@ -760,7 +890,7 @@ impl ChannelManager { channel_id: msg.channel_id, htlc_id: msg.htlc_id, reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data), - })), Some(shared_secret), channel_state.unwrap()); + })), channel_state.unwrap()); } } } @@ -786,10 +916,10 @@ impl ChannelManager { let next_hop_data = { let mut decoded = [0; 65]; chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded); - match msgs::OnionHopData::decode(&decoded[..]) { + match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) { Err(err) => { let error_code = match err { - msgs::DecodeError::UnknownRealmByte => 0x4000 | 1, + msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte _ => 0x2000 | 2, // Should never happen }; return_err!("Unable to decode our hop data", error_code, &[0;0]); @@ -798,13 +928,17 @@ impl ChannelManager { } }; - //TODO: Check that msg.cltv_expiry is within acceptable bounds! - let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! - if next_hop_data.data.amt_to_forward != msg.amount_msat { + // final_expiry_too_soon + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); + } + // final_incorrect_htlc_amount + if next_hop_data.data.amt_to_forward > msg.amount_msat { return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); } + // final_incorrect_cltv_expiry if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry { return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); } @@ -818,7 +952,7 @@ impl ChannelManager { onion_packet: None, payment_hash: msg.payment_hash.clone(), short_channel_id: 0, - prev_short_channel_id: 0, + incoming_shared_secret: shared_secret.clone(), amt_to_forward: next_hop_data.data.amt_to_forward, outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, }) @@ -858,7 +992,7 @@ impl ChannelManager { onion_packet: Some(outgoing_packet), payment_hash: msg.payment_hash.clone(), short_channel_id: next_hop_data.data.short_channel_id, - prev_short_channel_id: 0, + incoming_shared_secret: shared_secret.clone(), amt_to_forward: next_hop_data.data.amt_to_forward, outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, }) @@ -869,37 +1003,63 @@ impl ChannelManager { if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); let forwarding_id = match id_option { - None => { + None => { // unknown_next_peer return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]); }, Some(id) => id.clone(), }; - if let Some((err, code, chan_update)) = { + if let Some((err, code, chan_update)) = loop { let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); - if !chan.is_live() { - Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap())) - } else { - let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); - if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { - Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap())) - } else { - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { - Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap())) - } else { - None - } - } + + // Note that we could technically not return an error yet here and just hope + // that the connection is reestablished or monitor updated by the time we get + // around to doing the actual forward, but better to fail early if we can and + // hopefully an attacker trying to path-trace payments cannot make this occur + // on a small/per-node/per-channel scale. + if !chan.is_live() { // channel_disabled + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap()))); + } + if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum + break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap()))); + } + let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); + if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient + break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); + } + if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry + break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } - } { - return_err!(err, code, &chan_update.encode_with_len()[..]); + let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; + // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); + } + if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far + break Some(("CLTV expiry is too far in the future", 21, None)); + } + break None; + } + { + let mut res = Vec::with_capacity(8 + 128); + if code == 0x1000 | 11 || code == 0x1000 | 12 { + res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat)); + } + else if code == 0x1000 | 13 { + res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry)); + } + if let Some(chan_update) = chan_update { + res.extend_from_slice(&chan_update.encode_with_len()[..]); + } + return_err!(err, code, &res[..]); } } } - (pending_forward_info, Some(shared_secret), channel_state.unwrap()) + (pending_forward_info, channel_state.unwrap()) } /// only fails if the channel does not yet have an assigned short_id + /// May be called with channel_state already locked! fn get_channel_update(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { None => return Err(HandleError{err: "Channel not yet established", action: None}), @@ -921,7 +1081,7 @@ impl ChannelManager { }; let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); //TODO Can we unwrap here? + let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); Ok(msgs::ChannelUpdate { signature: sig, @@ -930,24 +1090,29 @@ impl ChannelManager { } /// Sends a payment along a given route. + /// /// Value parameters are provided via the last hop in route, see documentation for RouteHop /// fields for more info. + /// /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative /// payment), we don't do anything to stop you! We always try to ensure that if the provided /// next hop knows the preimage to payment_hash they can claim an additional amount as /// specified in the last hop in the route! Thus, you should probably do your own /// payment_preimage tracking (which you should already be doing as they represent "proof of /// payment") and prevent double-sends yourself. - /// See-also docs on Channel::send_htlc_and_commit. + /// /// May generate a SendHTLCs event on success, which should be relayed. - pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), HandleError> { + /// + /// Raises APIError::RoutError when invalid route or forward parameter + /// (cltv_delta, fee, node public key) is specified + pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { if route.hops.len() < 1 || route.hops.len() > 20 { - return Err(HandleError{err: "Route didn't go anywhere/had bogus size", action: None}); + return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); } let our_node_id = self.get_our_node_id(); for (idx, hop) in route.hops.iter().enumerate() { if idx != route.hops.len() - 1 && hop.pubkey == our_node_id { - return Err(HandleError{err: "Route went through us but wasn't a simple rebalance loop to us", action: None}); + return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"}); } } @@ -959,52 +1124,59 @@ impl ChannelManager { let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - //TODO: This should return something other than HandleError, that's really intended for - //p2p-returns only. let onion_keys = secp_call!(ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv), - HandleError{err: "Pubkey along hop was maliciously selected", action: Some(msgs::ErrorAction::IgnoreError)}); + APIError::RouteError{err: "Pubkey along hop was maliciously selected"}); let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?; - let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash)?; + let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); - let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = channel_state_lock.borrow_parts(); + let (first_hop_node_id, update_add, commitment_signed) = { + let mut channel_state = self.channel_state.lock().unwrap(); let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { - None => return Err(HandleError{err: "No channel available with first hop!", action: None}), - Some(id) => id.clone() + None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), + Some(id) => id.clone(), }; - let claimable_htlc_entry = channel_state.claimable_htlcs.entry(payment_hash.clone()); - if let hash_map::Entry::Occupied(_) = claimable_htlc_entry { - return Err(HandleError{err: "Already had pending HTLC with the same payment_hash", action: None}); - } - let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { - return Err(HandleError{err: "Node ID mismatch on first hop!", action: None}); + let res = { + let chan = channel_state.by_id.get_mut(&id).unwrap(); + if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if chan.is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + } + chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + route: route.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? + }; + match res { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); + return Err(APIError::MonitorUpdateFailed); + } + Some((update_add, commitment_signed)) + }, + None => None, } - chan.send_htlc_and_commit(htlc_msat, payment_hash, htlc_cltv, onion_packet)? }; let first_hop_node_id = route.hops.first().unwrap().pubkey; - claimable_htlc_entry.or_insert(PendingOutboundHTLC::OutboundRoute { - route, - session_priv, - }); - match res { - Some(msgs) => (first_hop_node_id, msgs), + Some((update_add, commitment_signed)) => { + (first_hop_node_id, update_add, commitment_signed) + }, None => return Ok(()), } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); // maybe remove from claimable_htlcs? - } - let mut events = self.pending_events.lock().unwrap(); events.push(events::Event::UpdateHTLCs { node_id: first_hop_node_id, @@ -1013,6 +1185,7 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed, }, }); @@ -1020,11 +1193,12 @@ impl ChannelManager { } /// Call this upon creation of a funding transaction for the given channel. + /// /// Panics if a funding transaction has already been provided for this channel. + /// /// May panic if the funding_txo is duplicative with some other channel (note that this should /// be trivially prevented by using unique funding transaction keys per-channel). pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { - macro_rules! add_pending_event { ($event: expr) => { { @@ -1055,9 +1229,11 @@ impl ChannelManager { }, None => return } - }; // Release channel lock for install_watch_outpoint call, + }; + // Because we have exclusive ownership of the channel here we can release the channel_state + // lock before add_update_monitor if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); // maybe remove from claimable_htlcs? + unimplemented!(); } add_pending_event!(events::Event::SendFundingCreated { node_id: chan.get_their_node_id(), @@ -1094,6 +1270,7 @@ impl ChannelManager { } /// Processes HTLCs which are pending waiting on random forward delay. + /// /// Should only really ever be called in response to an PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { @@ -1107,14 +1284,19 @@ impl ChannelManager { return; } - for (short_chan_id, pending_forwards) in channel_state.forward_htlcs.drain() { + for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { Some(chan_id) => chan_id.clone(), None => { failed_forwards.reserve(pending_forwards.len()); - for forward_info in pending_forwards { - failed_forwards.push((forward_info.payment_hash, 0x4000 | 10, None)); + for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None)); } continue; } @@ -1122,11 +1304,16 @@ impl ChannelManager { let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap(); let mut add_htlc_msgs = Vec::new(); - for forward_info in pending_forwards { - match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, forward_info.onion_packet.unwrap()) { + for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { Err(_e) => { let chan_update = self.get_channel_update(forward_chan).unwrap(); - failed_forwards.push((forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); continue; }, Ok(update_add) => { @@ -1159,56 +1346,66 @@ impl ChannelManager { continue; }, }; - new_events.push((Some(monitor), events::Event::UpdateHTLCs { + if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + unimplemented!();// but def dont push the event... + } + new_events.push(events::Event::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), updates: msgs::CommitmentUpdate { update_add_htlcs: add_htlc_msgs, update_fulfill_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, }, - })); + }); } } else { - for forward_info in pending_forwards { - new_events.push((None, events::Event::PaymentReceived { + for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { + let prev_hop_data = HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }; + match channel_state.claimable_htlcs.entry(forward_info.payment_hash) { + hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), + hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, + }; + new_events.push(events::Event::PaymentReceived { payment_hash: forward_info.payment_hash, amt: forward_info.amt_to_forward, - })); + }); } } } } - for failed_forward in failed_forwards.drain(..) { - match failed_forward.2 { - None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: Vec::new() }), - Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failed_forward.0, HTLCFailReason::Reason { failure_code: failed_forward.1, data: chan_update.encode_with_len() }), + for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) { + match update { + None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }), + Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }), }; } if new_events.is_empty() { return } - - new_events.retain(|event| { - if let &Some(ref monitor) = &event.0 { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) { - unimplemented!();// but def dont push the event... - } - } - true - }); - let mut events = self.pending_events.lock().unwrap(); - events.reserve(new_events.len()); - for event in new_events.drain(..) { - events.push(event.1); - } + events.append(&mut new_events); } /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event. pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() }) + // TODO: Add ability to return 0x4000|16 (incorrect_payment_amount) if the amount we + // received is < expected or > 2*expected + let mut channel_state = Some(self.channel_state.lock().unwrap()); + let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); + if let Some(mut sources) = removed_source { + for htlc_with_hash in sources.drain(..) { + if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } + self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() }); + } + true + } else { false } } /// Fails an HTLC backwards to the sender of it to us. @@ -1217,37 +1414,27 @@ impl ChannelManager { /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard, payment_hash: &[u8; 32], onion_error: HTLCFailReason) -> bool { - let mut pending_htlc = { - match channel_state.claimable_htlcs.remove(payment_hash) { - Some(pending_htlc) => pending_htlc, - None => return false, - } - }; - - match pending_htlc { - PendingOutboundHTLC::CycledRoute { source_short_channel_id, incoming_packet_shared_secret, route, session_priv } => { - channel_state.claimable_htlcs.insert(payment_hash.clone(), PendingOutboundHTLC::OutboundRoute { - route, - session_priv, - }); - pending_htlc = PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, incoming_packet_shared_secret }; - }, - _ => {} - } - - match pending_htlc { - PendingOutboundHTLC::CycledRoute { .. } => unreachable!(), - PendingOutboundHTLC::OutboundRoute { .. } => { + fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) { + match source { + HTLCSource::OutboundRoute { .. } => { mem::drop(channel_state); - - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentFailed { - payment_hash: payment_hash.clone() - }); - false + if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error { + let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone()); + let mut pending_events = self.pending_events.lock().unwrap(); + if let Some(channel_update) = channel_update { + pending_events.push(events::Event::PaymentFailureNetworkUpdate { + update: channel_update, + }); + } + pending_events.push(events::Event::PaymentFailed { + payment_hash: payment_hash.clone(), + rejected_by_dest: !payment_retryable, + }); + } else { + panic!("should have onion error packet here"); + } }, - PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, incoming_packet_shared_secret } => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => { let err_packet = match onion_error { HTLCFailReason::Reason { failure_code, data } => { let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); @@ -1259,29 +1446,31 @@ impl ChannelManager { }; let (node_id, fail_msgs) = { - let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) { + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { Some(chan_id) => chan_id.clone(), - None => return false + None => return }; let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fail_htlc_and_commit(payment_hash, err_packet) { - Ok(msg) => (chan.get_their_node_id(), msg), + match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) { + Ok(Some((msg, commitment_msg, chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (chan.get_their_node_id(), Some((msg, commitment_msg))) + }, + Ok(None) => (chan.get_their_node_id(), None), Err(_e) => { //TODO: Do something with e? - return false; + return; }, } }; match fail_msgs { - Some((msg, commitment_msg, chan_monitor)) => { + Some((msg, commitment_msg)) => { mem::drop(channel_state); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } - let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::UpdateHTLCs { node_id, @@ -1290,14 +1479,13 @@ impl ChannelManager { update_fulfill_htlcs: Vec::new(), update_fail_htlcs: vec![msg], update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, }, }); }, None => {}, } - - true }, } } @@ -1305,83 +1493,66 @@ impl ChannelManager { /// Provides a payment preimage in response to a PaymentReceived event, returning true and /// generating message events for the net layer to claim the payment, if possible. Thus, you /// should probably kick the net layer to go send messages if this returns true! + /// /// May panic if called except in response to a PaymentReceived event. pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool { - self.claim_funds_internal(payment_preimage, true) - } - fn claim_funds_internal(&self, payment_preimage: [u8; 32], from_user: bool) -> bool { let mut sha = Sha256::new(); sha.input(&payment_preimage); let mut payment_hash = [0; 32]; sha.result(&mut payment_hash); - let mut channel_state = self.channel_state.lock().unwrap(); - let mut pending_htlc = { - match channel_state.claimable_htlcs.remove(&payment_hash) { - Some(pending_htlc) => pending_htlc, - None => return false, + let mut channel_state = Some(self.channel_state.lock().unwrap()); + let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); + if let Some(mut sources) = removed_source { + for htlc_with_hash in sources.drain(..) { + if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } + self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage); } - }; - - match pending_htlc { - PendingOutboundHTLC::CycledRoute { source_short_channel_id, incoming_packet_shared_secret, route, session_priv } => { - if from_user { // This was the end hop back to us - pending_htlc = PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, incoming_packet_shared_secret }; - channel_state.claimable_htlcs.insert(payment_hash, PendingOutboundHTLC::OutboundRoute { route, session_priv }); - } else { // This came from the first upstream node - // Bank error in our favor! Maybe we should tell the user this somehow??? - pending_htlc = PendingOutboundHTLC::OutboundRoute { route, session_priv }; - channel_state.claimable_htlcs.insert(payment_hash, PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, incoming_packet_shared_secret }); - } - }, - _ => {}, - } - - match pending_htlc { - PendingOutboundHTLC::CycledRoute { .. } => unreachable!(), - PendingOutboundHTLC::OutboundRoute { .. } => { - if from_user { - panic!("Called claim_funds with a preimage for an outgoing payment. There is nothing we can do with this, and something is seriously wrong if you knew this..."); - } + true + } else { false } + } + fn claim_funds_internal(&self, mut channel_state: MutexGuard, source: HTLCSource, payment_preimage: [u8; 32]) { + match source { + HTLCSource::OutboundRoute { .. } => { mem::drop(channel_state); let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::PaymentSent { payment_preimage }); - false }, - PendingOutboundHTLC::IntermediaryHopData { source_short_channel_id, .. } => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { + //TODO: Delay the claimed_funds relaying just like we do outbound relay! let (node_id, fulfill_msgs) = { - let chan_id = match channel_state.short_to_id.get(&source_short_channel_id) { + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { Some(chan_id) => chan_id.clone(), None => { // TODO: There is probably a channel manager somewhere that needs to // learn the preimage as the channel already hit the chain and that's // why its missing. - return false + return } }; let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fulfill_htlc_and_commit(payment_preimage) { - Ok(msg) => (chan.get_their_node_id(), msg), + match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { + Ok((msgs, Some(chan_monitor))) => { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!();// but def dont push the event... + } + (chan.get_their_node_id(), msgs) + }, + Ok((msgs, None)) => (chan.get_their_node_id(), msgs), Err(_e) => { // TODO: There is probably a channel manager somewhere that needs to // learn the preimage as the channel may be about to hit the chain. //TODO: Do something with e? - return false; + return }, } }; mem::drop(channel_state); - if let Some(chan_monitor) = fulfill_msgs.1 { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } - } - - if let Some((msg, commitment_msg)) = fulfill_msgs.0 { + if let Some((msg, commitment_msg)) = fulfill_msgs { let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::UpdateHTLCs { node_id: node_id, @@ -1390,11 +1561,11 @@ impl ChannelManager { update_fulfill_htlcs: vec![msg], update_fail_htlcs: Vec::new(), update_fail_malformed_htlcs: Vec::new(), + update_fee: None, commitment_signed: commitment_msg, } }); } - true }, } } @@ -1408,7 +1579,83 @@ impl ChannelManager { /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update /// operation. pub fn test_restore_channel_monitor(&self) { - unimplemented!(); + let mut new_events = Vec::new(); + let mut close_results = Vec::new(); + let mut htlc_forwards = Vec::new(); + let mut htlc_failures = Vec::new(); + + { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + let short_to_id = channel_state.short_to_id; + channel_state.by_id.retain(|_, channel| { + if channel.is_awaiting_monitor_update() { + let chan_monitor = channel.channel_monitor(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + if let Some(short_id) = channel.get_short_channel_id() { + short_to_id.remove(&short_id); + } + close_results.push(channel.force_shutdown()); + if let Ok(update) = self.get_channel_update(&channel) { + new_events.push(events::Event::BroadcastChannelUpdate { + msg: update + }); + } + false + }, + ChannelMonitorUpdateErr::TemporaryFailure => true, + } + } else { + let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored(); + if !pending_forwards.is_empty() { + htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); + } + htlc_failures.append(&mut pending_failures); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + new_events.push(events::Event::UpdateHTLCs { + node_id: channel.get_their_node_id(), + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + new_events.push(events::Event::SendRevokeAndACK { + node_id: channel.get_their_node_id(), + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + true + } + } else { true } + }); + } + + for failure in htlc_failures.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + } + self.forward_htlcs(&mut htlc_forwards[..]); + + for res in close_results.drain(..) { + self.finish_force_close_channel(res); + } + + self.pending_events.lock().unwrap().append(&mut new_events); } fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result { @@ -1440,7 +1687,8 @@ impl ChannelManager { } }; - let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)).map_err(|e| MsgHandleErrInternal::from_no_close(e))?; + let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)) + .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let accept_msg = channel.get_accept_channel(); channel_state.by_id.insert(channel.channel_id(), channel); Ok(accept_msg) @@ -1455,7 +1703,8 @@ impl ChannelManager { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + chan.accept_channel(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) }, //TODO: same as above @@ -1492,10 +1741,9 @@ impl ChannelManager { }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } - }; // Release channel lock for install_watch_outpoint call, - // note that this means if the remote end is misbehaving and sends a message for the same - // channel back-to-back with funding_created, we'll end up thinking they sent a message - // for a bogus channel. + }; + // Because we have exclusive ownership of the channel here we can release the channel_state + // lock before add_update_monitor if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } @@ -1512,7 +1760,7 @@ impl ChannelManager { } fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { - let (funding_txo, user_id, monitor) = { + let (funding_txo, user_id) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1521,14 +1769,14 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor) + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (chan.get_funding_txo().unwrap(), chan.get_user_id()) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); - } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::FundingBroadcastSafe { funding_txo: funding_txo, @@ -1545,7 +1793,8 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + chan.funding_locked(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; return Ok(self.get_announcement_sigs(chan)); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) @@ -1553,7 +1802,7 @@ impl ChannelManager { } fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option, Option), MsgHandleErrInternal> { - let (res, chan_option) = { + let (mut res, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1574,9 +1823,9 @@ impl ChannelManager { hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - for payment_hash in res.2 { + for htlc_source in res.2.drain(..) { // unknown_next_peer...I dunno who that is anymore.... - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update(&chan) { @@ -1639,38 +1888,10 @@ impl ChannelManager { //encrypted with the same key. Its not immediately obvious how to usefully exploit that, //but we should prevent it anyway. - let (mut pending_forward_info, shared_secret, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); + let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = channel_state_lock.borrow_parts(); - let claimable_htlcs_entry = channel_state.claimable_htlcs.entry(msg.payment_hash.clone()); - - // We dont correctly handle payments that route through us twice on their way to their - // destination. That's OK since those nodes are probably busted or trying to do network - // mapping through repeated loops. In either case, we want them to stop talking to us, so - // we send permanent_node_failure. - let mut will_forward = false; - if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { short_channel_id, .. }) = pending_forward_info { - if let &hash_map::Entry::Occupied(ref e) = &claimable_htlcs_entry { - let mut acceptable_cycle = false; - if let &PendingOutboundHTLC::OutboundRoute { .. } = e.get() { - acceptable_cycle = short_channel_id == 0; - } - if !acceptable_cycle { - log_info!(self, "Failed to accept incoming HTLC: Payment looped through us twice"); - pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason: ChannelManager::build_first_hop_failure_packet(&shared_secret.unwrap(), 0x4000 | 0x2000 | 2, &[0;0]), - })); - } else { - will_forward = true; - } - } else { - will_forward = true; - } - } - - let (source_short_channel_id, res) = match channel_state.by_id.get_mut(&msg.channel_id) { + match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { if chan.get_their_node_id() != *their_node_id { //TODO: here MsgHandleErrInternal, #153 case @@ -1679,140 +1900,227 @@ impl ChannelManager { if !chan.is_usable() { return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Channel not yet available for receiving HTLCs", action: Some(msgs::ErrorAction::IgnoreError)})); } - let short_channel_id = chan.get_short_channel_id().unwrap(); - if let PendingHTLCStatus::Forward(ref mut forward_info) = pending_forward_info { - forward_info.prev_short_channel_id = short_channel_id; - } - (short_channel_id, chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?) + chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }; - - if will_forward { - match claimable_htlcs_entry { - hash_map::Entry::Occupied(mut e) => { - let outbound_route = e.get_mut(); - let (route, session_priv) = match outbound_route { - &mut PendingOutboundHTLC::OutboundRoute { ref route, ref session_priv } => { - (route.clone(), session_priv.clone()) - }, - _ => unreachable!(), - }; - *outbound_route = PendingOutboundHTLC::CycledRoute { - source_short_channel_id, - incoming_packet_shared_secret: shared_secret.unwrap(), - route, - session_priv, - }; - }, - hash_map::Entry::Vacant(e) => { - e.insert(PendingOutboundHTLC::IntermediaryHopData { - source_short_channel_id, - incoming_packet_shared_secret: shared_secret.unwrap(), - }); - } - } } - - Ok(res) } fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - //TODO: Delay the claimed_funds relaying just like we do outbound relay! - // Claim funds first, cause we don't really care if the channel we received the message on - // is broken, we may have enough info to get our own money! - self.claim_funds_internal(msg.payment_preimage.clone(), false); - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { + let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { if chan.get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fulfill_htlc(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + chan.update_fulfill_htlc(&msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - } + }; + self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone()); + Ok(()) } - fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result, MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - let payment_hash = match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }?; + // Process failure we got back from upstream on a payment we sent. Returns update and a boolean + // indicating that the payment itself failed + fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec) -> (Option, bool) { + if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source { + macro_rules! onion_failure_log { + ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => { + log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value); + }; + ( $error_code_textual: expr, $error_code: expr ) => { + log_trace!(self, "{}({})", $error_code_textual, $error_code); + }; + } + + const BADONION: u16 = 0x8000; + const PERM: u16 = 0x4000; + const UPDATE: u16 = 0x1000; + + let mut res = None; + let mut htlc_msat = *first_hop_htlc_msat; + + // Handle packed channel/node updates for passing back for the route handler + Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| { + if res.is_some() { return; } + + let incoming_htlc_msat = htlc_msat; + let amt_to_forward = htlc_msat - route_hop.fee_msat; + htlc_msat = amt_to_forward; + + let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); + + let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); + decryption_tmp.resize(packet_decrypted.len(), 0); + let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]); + chacha.process(&packet_decrypted, &mut decryption_tmp[..]); + packet_decrypted = decryption_tmp; + + let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey; + + if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) { + let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); + let mut hmac = Hmac::new(Sha256::new(), &um); + hmac.input(&err_packet.encode()[32..]); + let mut calc_tag = [0u8; 32]; + hmac.raw_result(&mut calc_tag); + + if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) { + if err_packet.failuremsg.len() < 2 { + // Useless packet that we can't use but it passed HMAC, so it + // definitely came from the peer in question + res = Some((None, !is_from_final_node)); + } else { + let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]); + + match error_code & 0xff { + 1|2|3 => { + // either from an intermediate or final node + // invalid_realm(PERM|1), + // temporary_node_failure(NODE|2) + // permanent_node_failure(PERM|NODE|2) + // required_node_feature_mssing(PERM|NODE|3) + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: error_code & PERM == PERM, + }), !(error_code & PERM == PERM && is_from_final_node))); + // node returning invalid_realm is removed from network_map, + // although NODE flag is not set, TODO: or remove channel only? + // retry payment when removed node is not a final node + return; + }, + _ => {} + } + + if is_from_final_node { + let payment_retryable = match error_code { + c if c == PERM|15 => false, // unknown_payment_hash + c if c == PERM|16 => false, // incorrect_payment_amount + 17 => true, // final_expiry_too_soon + 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry + let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]); + true + }, + 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount + let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]); + true + }, + _ => { + // A final node has sent us either an invalid code or an error_code that + // MUST be sent from the processing node, or the formmat of failuremsg + // does not coform to the spec. + // Remove it from the network map and don't may retry payment + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false)); + return; + } + }; + res = Some((None, payment_retryable)); + return; + } - if let Some(pending_htlc) = channel_state.claimable_htlcs.get(&payment_hash) { - match pending_htlc { - &PendingOutboundHTLC::OutboundRoute { ref route, ref session_priv } => { - // Handle packed channel/node updates for passing back for the route handler - let mut packet_decrypted = msg.reason.data.clone(); - let mut res = None; - Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| { - if res.is_some() { return; } - - let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); - - let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); - decryption_tmp.resize(packet_decrypted.len(), 0); - let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]); - chacha.process(&packet_decrypted, &mut decryption_tmp[..]); - packet_decrypted = decryption_tmp; - - if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::decode(&packet_decrypted) { - if err_packet.failuremsg.len() >= 2 { - let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); - - let mut hmac = Hmac::new(Sha256::new(), &um); - hmac.input(&err_packet.encode()[32..]); - let mut calc_tag = [0u8; 32]; - hmac.raw_result(&mut calc_tag); - if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) { - const UNKNOWN_CHAN: u16 = 0x4000|10; - const TEMP_CHAN_FAILURE: u16 = 0x4000|7; - match byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]) { - TEMP_CHAN_FAILURE => { - if err_packet.failuremsg.len() >= 4 { - let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[2..4]) as usize; - if err_packet.failuremsg.len() >= 4 + update_len { - if let Ok(chan_update) = msgs::ChannelUpdate::decode(&err_packet.failuremsg[4..4 + update_len]) { - res = Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { - msg: chan_update, - }); + // now, error_code should be only from the intermediate nodes + match error_code { + _c if error_code & PERM == PERM => { + res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed { + short_channel_id: route_hop.short_channel_id, + is_permanent: true, + }), false)); + }, + _c if error_code & UPDATE == UPDATE => { + let offset = match error_code { + c if c == UPDATE|7 => 0, // temporary_channel_failure + c if c == UPDATE|11 => 8, // amount_below_minimum + c if c == UPDATE|12 => 8, // fee_insufficient + c if c == UPDATE|13 => 4, // incorrect_cltv_expiry + c if c == UPDATE|14 => 0, // expiry_too_soon + c if c == UPDATE|20 => 2, // channel_disabled + _ => { + // node sending unknown code + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false)); + return; + } + }; + + if err_packet.failuremsg.len() >= offset + 2 { + let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize; + if err_packet.failuremsg.len() >= offset + 4 + update_len { + if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) { + // if channel_update should NOT have caused the failure: + // MAY treat the channel_update as invalid. + let is_chan_update_invalid = match error_code { + c if c == UPDATE|7 => { // temporary_channel_failure + false + }, + c if c == UPDATE|11 => { // amount_below_minimum + let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]); + onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat); + incoming_htlc_msat > chan_update.contents.htlc_minimum_msat + }, + c if c == UPDATE|12 => { // fee_insufficient + let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]); + let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) }); + onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat); + new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap() } - } + c if c == UPDATE|13 => { // incorrect_cltv_expiry + let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]); + onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry); + route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta + }, + c if c == UPDATE|20 => { // channel_disabled + let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]); + onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags); + chan_update.contents.flags & 0x01 == 0x01 + }, + c if c == UPDATE|21 => true, // expiry_too_far + _ => { unreachable!(); }, + }; + + let msg = if is_chan_update_invalid { None } else { + Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { + msg: chan_update, + }) + }; + res = Some((msg, true)); + return; } - }, - UNKNOWN_CHAN => { - // No such next-hop. We know this came from the - // current node as the HMAC validated. - res = Some(msgs::HTLCFailChannelUpdate::ChannelClosed { - short_channel_id: route_hop.short_channel_id - }); - }, - _ => {}, //TODO: Enumerate all of these! + } } + }, + _c if error_code & BADONION == BADONION => { + //TODO + }, + 14 => { // expiry_too_soon + res = Some((None, true)); + return; + } + _ => { + // node sending unknown code + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false)); + return; } } } - }).unwrap(); - Ok(res) - }, - _ => { Ok(None) }, - } - } else { - Ok(None) - } + } + } + }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?"); + res.unwrap_or((None, true)) + } else { ((None, true)) } } - fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { + fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1820,14 +2128,35 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - } + }?; + Ok(()) + } + + fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.get_mut(&msg.channel_id) { + Some(chan) => { + if chan.get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + if (msg.failure_code & 0x8000) != 0 { + return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id)); + } + chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + Ok(()) + }, + None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } } fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option), MsgHandleErrInternal> { - let (revoke_and_ack, commitment_signed, chan_monitor) = { + let (revoke_and_ack, commitment_signed) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1835,20 +2164,53 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))? + let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + (revoke_and_ack, commitment_signed) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - Ok((revoke_and_ack, commitment_signed)) } + #[inline] + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + let mut forward_event = None; + if !pending_forwards.is_empty() { + let mut channel_state = self.channel_state.lock().unwrap(); + if channel_state.forward_htlcs.is_empty() { + forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); + channel_state.next_forward = forward_event.unwrap(); + } + for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { + match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info })); + } + } + } + } + match forward_event { + Some(time) => { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PendingHTLCsForwardable { + time_forwardable: time + }); + } + None => {}, + } + } + } + fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result, MsgHandleErrInternal> { - let (res, mut pending_forwards, mut pending_failures, chan_monitor) = { + let ((res, pending_forwards, mut pending_failures), short_channel_id) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { @@ -1856,47 +2218,35 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))? + let (res, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + ((res, pending_forwards, pending_failures), chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &failure.0, failure.1); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } + self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]); - let mut forward_event = None; - if !pending_forwards.is_empty() { - let mut channel_state = self.channel_state.lock().unwrap(); - if channel_state.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state.next_forward = forward_event.unwrap(); - } - for forward_info in pending_forwards.drain(..) { - match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(forward_info); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(forward_info)); - } + Ok(res) + } + + fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.get_mut(&msg.channel_id) { + Some(chan) => { + if chan.get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - } - } - match forward_event { - Some(time) => { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PendingHTLCsForwardable { - time_forwardable: time - }); - } - None => {}, + chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + }, + None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } - - Ok(res) } fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { @@ -1913,7 +2263,7 @@ impl ChannelManager { let our_node_id = self.get_our_node_id(); let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); @@ -1939,7 +2289,70 @@ impl ChannelManager { Ok(()) } + fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), MsgHandleErrInternal> { + let res = { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.get_mut(&msg.channel_id) { + Some(chan) => { + if chan.get_their_node_id() != *their_node_id { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg) + .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + if let Some(monitor) = channel_monitor { + if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + unimplemented!(); + } + } + Ok((funding_locked, revoke_and_ack, commitment_update, order)) + }, + None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } + }; + + res + } + /// Begin Update fee process. Allowed only on an outbound channel. + /// If successful, will generate a UpdateHTLCs event, so you should probably poll + /// PeerManager::process_events afterwards. + /// Note: This API is likely to change! + #[doc(hidden)] + pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.get_mut(&channel_id) { + None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + Some(chan) => { + if !chan.is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); + } + if chan.is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.is_live() { + return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); + } + if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::UpdateHTLCs { + node_id: chan.get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + } + }, + } + Ok(()) + } } impl events::EventsProvider for ChannelManager { @@ -2136,7 +2549,7 @@ impl ChannelMessageHandler for ChannelManager { handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) } - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result, HandleError> { + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) } @@ -2153,25 +2566,21 @@ impl ChannelMessageHandler for ChannelManager { } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None}) - } - chan.update_fee(&*self.fee_estimator, &msg) - }, - None => return Err(HandleError{err: "Failed to find corresponding channel", action: None}) - } + handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id) } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) } + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option, Option, Option, RAACommitmentOrder), HandleError> { + handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) + } + fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { let mut new_events = Vec::new(); let mut failed_channels = Vec::new(); + let mut failed_payments = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -2194,13 +2603,23 @@ impl ChannelMessageHandler for ChannelManager { } }); } else { - for chan in channel_state.by_id { - if chan.1.get_their_node_id() == *their_node_id { - //TODO: mark channel disabled (and maybe announce such after a timeout). Also - //fail and wipe any uncommitted outbound HTLCs as those are considered after - //reconnect. + channel_state.by_id.retain(|_, chan| { + if chan.get_their_node_id() == *their_node_id { + //TODO: mark channel disabled (and maybe announce such after a timeout). + let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(); + if !failed_adds.is_empty() { + let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe + failed_payments.push((chan_update, failed_adds)); + } + if chan.is_shutdown() { + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + return false; + } } - } + true + }) } } for failure in failed_channels.drain(..) { @@ -2212,6 +2631,32 @@ impl ChannelMessageHandler for ChannelManager { pending_events.push(event); } } + for (chan_update, mut htlc_sources) in failed_payments { + for (htlc_source, payment_hash) in htlc_sources.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() }); + } + } + } + + fn peer_connected(&self, their_node_id: &PublicKey) -> Vec { + let mut res = Vec::new(); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.by_id.retain(|_, chan| { + if chan.get_their_node_id() == *their_node_id { + if !chan.have_received_message() { + // If we created this (outbound) channel while we were disconnected from the + // peer we probably failed to send the open_channel message, which is now + // lost. We can't have had anything pending related to this channel, so we just + // drop it. + false + } else { + res.push(chan.get_channel_reestablish()); + true + } + } else { true } + }); + //TODO: Also re-broadcast announcement_signatures + res } fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) { @@ -2233,12 +2678,15 @@ mod tests { use chain::transaction::OutPoint; use chain::chaininterface::ChainListener; use ln::channelmanager::{ChannelManager,OnionKeys}; + use ln::channelmonitor::{ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route, RouteHop, Router}; use ln::msgs; - use ln::msgs::{MsgEncodable,ChannelMessageHandler,RoutingMessageHandler}; + use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; use util::test_utils; use util::events::{Event, EventsProvider}; + use util::errors::APIError; use util::logger::Logger; + use util::ser::Writeable; use bitcoin::util::hash::Sha256dHash; use bitcoin::blockdata::block::{Block, BlockHeader}; @@ -2258,9 +2706,12 @@ mod tests { use rand::{thread_rng,Rng}; - use std::collections::HashMap; + use std::cell::RefCell; + use std::collections::{BTreeSet, HashMap}; use std::default::Default; + use std::rc::Rc; use std::sync::{Arc, Mutex}; + use std::sync::atomic::Ordering; use std::time::Instant; use std::mem; @@ -2384,7 +2835,7 @@ mod tests { }, ); - let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]).unwrap(); + let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]); // Just check the final packet encoding, as it includes all the per-hop vectors in it // anyway... assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap()); @@ -2430,11 +2881,31 @@ mod tests { chan_monitor: Arc, node: Arc, router: Router, + network_payment_count: Rc>, + network_chan_count: Rc>, + } + impl Drop for Node { + fn drop(&mut self) { + if !::std::thread::panicking() { + // Check that we processed all pending events + assert_eq!(self.node.get_and_clear_pending_events().len(), 0); + assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0); + } + } } - static mut CHAN_COUNT: u32 = 0; fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { - node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 10001, 42).unwrap(); + create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001) + } + + fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { + let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat); + let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked); + (announcement, as_update, bs_update, channel_id, tx) + } + + fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction { + node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); let events_1 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_1.len(), 1); @@ -2448,7 +2919,7 @@ mod tests { node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_chan).unwrap(); - let chan_id = unsafe { CHAN_COUNT }; + let chan_id = *node_a.network_chan_count.borrow(); let tx; let funding_output; @@ -2456,7 +2927,7 @@ mod tests { assert_eq!(events_2.len(), 1); match events_2[0] { Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => { - assert_eq!(*channel_value_satoshis, 100000); + assert_eq!(*channel_value_satoshis, channel_value); assert_eq!(user_channel_id, 42); tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut { @@ -2506,47 +2977,63 @@ mod tests { _ => panic!("Unexpected event"), }; - confirm_transaction(&node_a.chain_monitor, &tx, chan_id); - let events_5 = node_a.node.get_and_clear_pending_events(); + tx + } + + fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) { + confirm_transaction(&node_b.chain_monitor, &tx, tx.version); + let events_5 = node_b.node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_b.node.get_our_node_id()); + assert_eq!(*node_id, node_a.node.get_our_node_id()); assert!(announcement_sigs.is_none()); - node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap() + node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap() }, _ => panic!("Unexpected event"), }; let channel_id; - confirm_transaction(&node_b.chain_monitor, &tx, chan_id); - let events_6 = node_b.node.get_and_clear_pending_events(); + confirm_transaction(&node_a.chain_monitor, &tx, tx.version); + let events_6 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_6.len(), 1); - let as_announcement_sigs = match events_6[0] { + (match events_6[0] { Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => { - assert_eq!(*node_id, node_a.node.get_our_node_id()); channel_id = msg.channel_id.clone(); - let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap(); - node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap(); - as_announcement_sigs + assert_eq!(*node_id, node_b.node.get_our_node_id()); + (msg.clone(), announcement_sigs.clone().unwrap()) }, _ => panic!("Unexpected event"), + }, channel_id) + } + + fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) { + let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat); + let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx); + (msgs, chan_id, tx) + } + + fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { + let bs_announcement_sigs = { + let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap(); + node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap(); + bs_announcement_sigs }; - let events_7 = node_a.node.get_and_clear_pending_events(); + let events_7 = node_b.node.get_and_clear_pending_events(); assert_eq!(events_7.len(), 1); - let (announcement, as_update) = match events_7[0] { + let (announcement, bs_update) = match events_7[0] { Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { (msg, update_msg) }, _ => panic!("Unexpected event"), }; - node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap(); - let events_8 = node_b.node.get_and_clear_pending_events(); + node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap(); + let events_8 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_8.len(), 1); - let bs_update = match events_8[0] { + let as_update = match events_8[0] { Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { assert!(*announcement == *msg); update_msg @@ -2554,15 +3041,17 @@ mod tests { _ => panic!("Unexpected event"), }; - unsafe { - CHAN_COUNT += 1; - } + *node_a.network_chan_count.borrow_mut() += 1; - ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx) + ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone()) } fn create_announced_chan_between_nodes(nodes: &Vec, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { - let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]); + create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001) + } + + fn create_announced_chan_between_nodes_with_value(nodes: &Vec, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { + let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat); for node in nodes { assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap()); node.router.handle_channel_update(&chan_announcement.1).unwrap(); @@ -2571,6 +3060,17 @@ mod tests { (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) } + macro_rules! check_spends { + ($tx: expr, $spends_tx: expr) => { + { + let mut funding_tx_map = HashMap::new(); + let spends_tx = $spends_tx; + funding_tx_map.insert(spends_tx.txid(), spends_tx); + $tx.verify(&funding_tx_map).unwrap(); + } + } + } + fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) { let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) }; let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; @@ -2614,9 +3114,7 @@ mod tests { tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); } assert_eq!(tx_a, tx_b); - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(funding_tx.txid(), funding_tx); - tx_a.verify(&funding_tx_map).unwrap(); + check_spends!(tx_a, funding_tx); let events_2 = node_a.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); @@ -2645,38 +3143,82 @@ mod tests { commitment_msg: msgs::CommitmentSigned, } impl SendEvent { + fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent { + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } + } + fn from_event(event: Event) -> SendEvent { match event { - Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, commitment_signed } } => { - assert!(update_fulfill_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed } - }, + Event::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), _ => panic!("Unexpected event type!"), } } } - static mut PAYMENT_COUNT: u8 = 0; + macro_rules! check_added_monitors { + ($node: expr, $count: expr) => { + { + let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), $count); + added_monitors.clear(); + } + } + } + + macro_rules! commitment_signed_dance { + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { + { + check_added_monitors!($node_a, 0); + let (as_revoke_and_ack, as_commitment_signed) = $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + check_added_monitors!($node_a, 1); + check_added_monitors!($node_b, 0); + assert!($node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!($node_b, 1); + let (bs_revoke_and_ack, bs_none) = $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_none.is_none()); + check_added_monitors!($node_b, 1); + if $fail_backwards { + assert!($node_a.node.get_and_clear_pending_events().is_empty()); + } + assert!($node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + { + let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); + if $fail_backwards { + assert_eq!(added_monitors.len(), 2); + assert!(added_monitors[0].0 != added_monitors[1].0); + } else { + assert_eq!(added_monitors.len(), 1); + } + added_monitors.clear(); + } + } + } + } + + macro_rules! get_payment_preimage_hash { + ($node: expr) => { + { + let payment_preimage = [*$node.network_payment_count.borrow(); 32]; + *$node.network_payment_count.borrow_mut() += 1; + let mut payment_hash = [0; 32]; + let mut sha = Sha256::new(); + sha.input(&payment_preimage[..]); + sha.result(&mut payment_hash); + (payment_preimage, payment_hash) + } + } + } + fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { - let our_payment_preimage = unsafe { [PAYMENT_COUNT; 32] }; - unsafe { PAYMENT_COUNT += 1 }; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node); let mut payment_event = { origin_node.node.send_payment(route, our_payment_hash).unwrap(); - { - let mut added_monitors = origin_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(origin_node, 1); let mut events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -2688,31 +3230,8 @@ mod tests { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - { - let added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 0); - } - - let revoke_and_ack = node.node.handle_commitment_signed(&prev_node.node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - { - let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - assert!(prev_node.node.handle_revoke_and_ack(&node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none()); - let prev_revoke_and_ack = prev_node.node.handle_commitment_signed(&node.node.get_our_node_id(), &revoke_and_ack.1.unwrap()).unwrap(); - { - let mut added_monitors = prev_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 2); - added_monitors.clear(); - } - assert!(node.node.handle_revoke_and_ack(&prev_node.node.get_our_node_id(), &prev_revoke_and_ack.0).unwrap().is_none()); - assert!(prev_revoke_and_ack.1.is_none()); - { - let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(node, 0); + commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); let events_1 = node.node.get_and_clear_pending_events(); assert_eq!(events_1.len(), 1); @@ -2735,11 +3254,7 @@ mod tests { _ => panic!("Unexpected event"), } } else { - { - let mut added_monitors = node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -2750,88 +3265,73 @@ mod tests { (our_payment_preimage, our_payment_hash) } - fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) { + fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) { assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage)); - { - let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fulfill_dance { ($node: expr, $prev_node: expr, $last_node: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - if $last_node { - assert_eq!(added_monitors.len(), 0); - } else { - assert_eq!(added_monitors.len(), 1); - } - added_monitors.clear(); - } - let revoke_and_commit = $node.node.handle_commitment_signed(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().1).unwrap(); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - assert!($prev_node.node.handle_revoke_and_ack(&$node.node.get_our_node_id(), &revoke_and_commit.0).unwrap().is_none()); - let revoke_and_ack = $prev_node.node.handle_commitment_signed(&$node.node.get_our_node_id(), &revoke_and_commit.1.unwrap()).unwrap(); - assert!(revoke_and_ack.1.is_none()); - { - let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 2); - added_monitors.clear(); - } - assert!($node.node.handle_revoke_and_ack(&$prev_node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none()); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); + if $last_node { + check_added_monitors!($node, 0); + } else { + check_added_monitors!($node, 1); } + commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); } } } let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id(); let mut prev_node = expected_route.last().unwrap(); - for node in expected_route.iter().rev() { + for (idx, node) in expected_route.iter().rev().enumerate() { assert_eq!(expected_next_node, node.node.get_our_node_id()); if next_msgs.is_some() { update_fulfill_dance!(node, prev_node, false); } let events = node.node.get_and_clear_pending_events(); + if !skip_last || idx != expected_route.len() - 1 { + assert_eq!(events.len(), 1); + match events[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + expected_next_node = node_id.clone(); + next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())); + }, + _ => panic!("Unexpected event"), + } + } else { + assert!(events.is_empty()); + } + if !skip_last && idx == expected_route.len() - 1 { + assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); + } + + prev_node = node; + } + + if !skip_last { + update_fulfill_dance!(origin_node, expected_route.first().unwrap(), true); + let events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - expected_next_node = node_id.clone(); - next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())); + Event::PaymentSent { payment_preimage } => { + assert_eq!(payment_preimage, our_payment_preimage); }, _ => panic!("Unexpected event"), - }; - - prev_node = node; + } } + } - assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); - update_fulfill_dance!(origin_node, expected_route.first().unwrap(), true); - - let events = origin_node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentSent { payment_preimage } => { - assert_eq!(payment_preimage, our_payment_preimage); - }, - _ => panic!("Unexpected event"), - } + fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) { + claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage); } const TEST_FINAL_CLTV: u32 = 32; @@ -2853,18 +3353,13 @@ mod tests { assert_eq!(hop.pubkey, node.node.get_our_node_id()); } - let our_payment_preimage = unsafe { [PAYMENT_COUNT; 32] }; - unsafe { PAYMENT_COUNT += 1 }; - let our_payment_hash = { - let mut sha = Sha256::new(); - sha.input(&our_payment_preimage[..]); - let mut ret = [0; 32]; - sha.result(&mut ret); - ret - }; + let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node); let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap(); - assert_eq!(err.err, "Cannot send value that would put us over our max HTLC value in flight"); + match err { + APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"), + _ => panic!("Unknown error variants"), + }; } fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) { @@ -2872,99 +3367,84 @@ mod tests { claim_payment(&origin, expected_route, our_payment_preimage); } - fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) { + fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) { assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); - { - let mut added_monitors = expected_route.last().unwrap().chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fail_dance { ($node: expr, $prev_node: expr, $last_node: expr) => { { $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap(); - let revoke_and_commit = $node.node.handle_commitment_signed(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().1).unwrap(); - - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - assert!($prev_node.node.handle_revoke_and_ack(&$node.node.get_our_node_id(), &revoke_and_commit.0).unwrap().is_none()); - { - let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - let revoke_and_ack = $prev_node.node.handle_commitment_signed(&$node.node.get_our_node_id(), &revoke_and_commit.1.unwrap()).unwrap(); - { - let mut added_monitors = $prev_node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } - assert!(revoke_and_ack.1.is_none()); - assert!($node.node.get_and_clear_pending_events().is_empty()); - assert!($node.node.handle_revoke_and_ack(&$prev_node.node.get_our_node_id(), &revoke_and_ack.0).unwrap().is_none()); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - if $last_node { - assert_eq!(added_monitors.len(), 1); - } else { - assert_eq!(added_monitors.len(), 2); - assert!(added_monitors[0].0 != added_monitors[1].0); - } - added_monitors.clear(); - } - } - } - } + commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node); + } + } + } let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id(); let mut prev_node = expected_route.last().unwrap(); - for node in expected_route.iter().rev() { + for (idx, node) in expected_route.iter().rev().enumerate() { assert_eq!(expected_next_node, node.node.get_our_node_id()); if next_msgs.is_some() { - update_fail_dance!(node, prev_node, false); + // We may be the "last node" for the purpose of the commitment dance if we're + // skipping the last node (implying it is disconnected) and we're the + // second-to-last node! + update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1); } let events = node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => { - assert!(update_add_htlcs.is_empty()); - assert!(update_fulfill_htlcs.is_empty()); - assert_eq!(update_fail_htlcs.len(), 1); - assert!(update_fail_malformed_htlcs.is_empty()); - expected_next_node = node_id.clone(); - next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone())); - }, - _ => panic!("Unexpected event"), - }; + if !skip_last || idx != expected_route.len() - 1 { + assert_eq!(events.len(), 1); + match events[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert_eq!(update_fail_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + expected_next_node = node_id.clone(); + next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone())); + }, + _ => panic!("Unexpected event"), + } + } else { + assert!(events.is_empty()); + } + if !skip_last && idx == expected_route.len() - 1 { + assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); + } prev_node = node; } - assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); - update_fail_dance!(origin_node, expected_route.first().unwrap(), true); + if !skip_last { + update_fail_dance!(origin_node, expected_route.first().unwrap(), true); - let events = origin_node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentFailed { payment_hash } => { - assert_eq!(payment_hash, our_payment_hash); - }, - _ => panic!("Unexpected event"), + let events = origin_node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { payment_hash, rejected_by_dest } => { + assert_eq!(payment_hash, our_payment_hash); + assert!(rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } } } + fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) { + fail_payment_along_route(origin_node, expected_route, false, our_payment_hash); + } + fn create_network(node_count: usize) -> Vec { let mut nodes = Vec::new(); let mut rng = thread_rng(); let secp_ctx = Secp256k1::new(); let logger: Arc = Arc::new(test_utils::TestLogger::new()); + let chan_count = Rc::new(RefCell::new(0)); + let payment_count = Rc::new(RefCell::new(0)); + for _ in 0..node_count { let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }); let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger))); @@ -2977,12 +3457,537 @@ mod tests { }; let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap(); let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id), chain_monitor.clone(), Arc::clone(&logger)); - nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router }); + nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, + network_payment_count: payment_count.clone(), + network_chan_count: chan_count.clone(), + }); } nodes } + #[test] + fn test_async_inbound_update_fee() { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // A B + // update_fee -> + // send (1) commitment_signed -. + // <- update_add_htlc/commitment_signed + // send (2) RAA (awaiting remote revoke) -. + // (1) commitment_signed is delivered -> + // .- send (3) RAA (awaiting remote revoke) + // (2) RAA is delivered -> + // .- send (4) commitment_signed + // <- (3) RAA is delivered + // send (5) commitment_signed -. + // <- (4) commitment_signed is delivered + // send (6) RAA -. + // (5) commitment_signed is delivered -> + // <- RAA + // (6) RAA is delivered -> + + // First nodes[0] generates an update_fee + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + check_added_monitors!(nodes[0], 1); + + // deliver(1), generate (3): + let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting nodes[0] revoke_and_ack + check_added_monitors!(nodes[1], 1); + + let bs_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2) + assert!(bs_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (4) + assert!(bs_update.as_ref().unwrap().update_fee.is_none()); // (4) + check_added_monitors!(nodes[1], 1); + + let as_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); // deliver (3) + assert!(as_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (5) + assert!(as_update.as_ref().unwrap().update_fee.is_none()); // (5) + check_added_monitors!(nodes[0], 1); + + let (as_second_revoke, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.unwrap().commitment_signed).unwrap(); // deliver (4) + assert!(as_second_commitment_signed.is_none()); // only (6) + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke, bs_second_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.unwrap().commitment_signed).unwrap(); // deliver (5) + assert!(bs_second_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + let events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); // deliver (6) + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_unordered_raa() { + // Just the intro to the previous test followed by an out-of-order RAA (which caused a + // crash in an earlier version of the update_fee patch) + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // First nodes[0] generates an update_fee + nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let update_msg = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + update_fee.as_ref() + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2) + assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack + check_added_monitors!(nodes[0], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); // deliver (2) + check_added_monitors!(nodes[1], 1); + + // We can't continue, sadly, because our (1) now has a bogus signature + } + + #[test] + fn test_multi_flight_update_fee() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // A B + // update_fee/commitment_signed -> + // .- send (1) RAA and (2) commitment_signed + // update_fee (never committed) -> + // (3) update_fee -> + // We have to manually generate the above update_fee, it is allowed by the protocol but we + // don't track which updates correspond to which revoke_and_ack responses so we're in + // AwaitingRAA mode and will not generate the update_fee yet. + // <- (1) RAA delivered + // (3) is generated and send (4) CS -. + // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't + // know the per_commitment_point to use for it. + // <- (2) commitment_signed delivered + // revoke_and_ack -> + // B should send no response here + // (4) commitment_signed delivered -> + // <- RAA/commitment_signed delivered + // revoke_and_ack -> + + // First nodes[0] generates an update_fee + let initial_feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) + Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref().unwrap(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + // Deliver first update_fee/commitment_signed pair, generating (1) and (2): + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap(); + let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap(); + check_added_monitors!(nodes[1], 1); + + // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment + // transaction: + nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Create the (3) update_fee message that nodes[0] will generate before it does... + let mut update_msg_2 = msgs::UpdateFee { + channel_id: update_msg_1.channel_id.clone(), + feerate_per_kw: (initial_feerate + 30) as u32, + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + + update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; + // Deliver (3) + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap(); + + // Deliver (1), generating (3) and (4) + let as_second_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(as_second_update.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_second_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + // Check that the update_fee newly generated matches what we delivered: + assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); + assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); + + // Deliver (2) commitment_signed + let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), bs_commitment_signed.as_ref().unwrap()).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(as_commitment_signed.is_none()); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + // Delever (4) + let (bs_second_revoke, bs_second_commitment) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.unwrap().commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + let (as_second_revoke, as_second_commitment) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment.unwrap()).unwrap(); + assert!(as_second_commitment.is_none()); + check_added_monitors!(nodes[0], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_vanilla() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed = commitment_signed.unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[1], 1); + } + + #[test] + fn test_update_fee_with_fundee_update_add_htlc() { + let mut nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + check_added_monitors!(nodes[0], 1); + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed = commitment_signed.unwrap(); + check_added_monitors!(nodes[1], 1); + + let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap(); + + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]); + + // nothing happens since node[1] is in AwaitingRemoteRevoke + nodes[1].node.send_payment(route, our_payment_hash).unwrap(); + { + let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 0); + added_monitors.clear(); + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + // node[1] has nothing to do + + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + // AwaitingRemoteRevoke ends here + + let commitment_update = resp_option.unwrap(); + assert_eq!(commitment_update.update_add_htlcs.len(), 1); + assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); + assert_eq!(commitment_update.update_fee.is_none(), true); + + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap(); + let (revoke, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + let commitment_signed = commitment_signed.unwrap(); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(resp_option.is_none()); + + let (revoke, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(commitment_signed.is_none()); + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap(); + check_added_monitors!(nodes[0], 1); + assert!(resp_option.is_none()); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { .. } => { }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); + + send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + } + + #[test] + fn test_update_fee() { + let nodes = create_network(2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + macro_rules! get_feerate { + ($node: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&channel_id).unwrap(); + chan.get_feerate() + }} + } + + // A B + // (1) update_fee/commitment_signed -> + // <- (2) revoke_and_ack + // .- send (3) commitment_signed + // (4) update_fee/commitment_signed -> + // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) + // <- (3) commitment_signed delivered + // send (6) revoke_and_ack -. + // <- (5) deliver revoke_and_ack + // (6) deliver revoke_and_ack -> + // .- send (7) commitment_signed in response to (4) + // <- (7) deliver commitment_signed + // revoke_and_ack -> + + // Create and deliver (1)... + let feerate = get_feerate!(nodes[0]); + nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + // Generate (2) and (3): + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + let commitment_signed_0 = commitment_signed.unwrap(); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + // Deliver (2): + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + // Create and deliver (4)... + nodes[0].node.update_fee(channel_id, feerate+30).unwrap(); + let events_0 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap(); + + let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap(); + // ... creating (5) + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + check_added_monitors!(nodes[1], 1); + + // Handle (3), creating (6): + let (revoke_msg_0, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + // Deliver (5): + let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[0], 1); + + // Deliver (6), creating (7): + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap(); + let commitment_signed = resp_option.unwrap().commitment_signed; + check_added_monitors!(nodes[1], 1); + + // Deliver (7) + let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + assert!(commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap(); + assert!(resp_option.is_none()); + check_added_monitors!(nodes[1], 1); + + assert_eq!(get_feerate!(nodes[0]), feerate + 30); + assert_eq!(get_feerate!(nodes[1]), feerate + 30); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + } + #[test] fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -3093,57 +4098,101 @@ mod tests { close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false); + } - // Check that we processed all pending events - for node in nodes { - assert_eq!(node.node.get_and_clear_pending_events().len(), 0); - assert_eq!(node.chan_monitor.added_monitors.lock().unwrap().len(), 0); - } + #[test] + fn duplicate_htlc_test() { + // Test that we accept duplicate payment_hash HTLCs across the network and that + // claiming/failing them are all separate and don't effect each other + let mut nodes = create_network(6); + + // Create some initial channels to route via 3 to 4/5 from 0/1/2 + create_announced_chan_between_nodes(&nodes, 0, 3); + create_announced_chan_between_nodes(&nodes, 1, 3); + create_announced_chan_between_nodes(&nodes, 2, 3); + create_announced_chan_between_nodes(&nodes, 3, 4); + create_announced_chan_between_nodes(&nodes, 3, 5); + + let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000); + + *nodes[0].network_payment_count.borrow_mut() -= 1; + assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage); + + *nodes[0].network_payment_count.borrow_mut() -= 1; + assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage); + + claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage); + fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash); + claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage); } #[derive(PartialEq)] enum HTLCType { NONE, TIMEOUT, SUCCESS } + /// Tests that the given node has broadcast transactions for the given Channel + /// + /// First checks that the latest local commitment tx has been broadcast, unless an explicit + /// commitment_tx is provided, which may be used to test that a remote commitment tx was + /// broadcast and the revoked outputs were claimed. + /// + /// Next tests that there is (or is not) a transaction that spends the commitment transaction + /// that appears to be the type of HTLC transaction specified in has_htlc_tx. + /// + /// All broadcast transactions must be accounted for in one of the above three types of we'll + /// also fail. fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option, has_htlc_tx: HTLCType) -> Vec { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 }); let mut res = Vec::with_capacity(2); - - if let Some(explicit_tx) = commitment_tx { - res.push(explicit_tx.clone()); - } else { - for tx in node_txn.iter() { - if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(chan.3.txid(), chan.3.clone()); - tx.verify(&funding_tx_map).unwrap(); + node_txn.retain(|tx| { + if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() { + check_spends!(tx, chan.3.clone()); + if commitment_tx.is_none() { res.push(tx.clone()); } - } + false + } else { true } + }); + if let Some(explicit_tx) = commitment_tx { + res.push(explicit_tx.clone()); } + assert_eq!(res.len(), 1); if has_htlc_tx != HTLCType::NONE { - for tx in node_txn.iter() { + node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(res[0].txid(), res[0].clone()); - tx.verify(&funding_tx_map).unwrap(); + check_spends!(tx, res[0].clone()); if has_htlc_tx == HTLCType::TIMEOUT { assert!(tx.lock_time != 0); } else { assert!(tx.lock_time == 0); } res.push(tx.clone()); - break; - } - } + false + } else { true } + }); assert_eq!(res.len(), 2); } - node_txn.clear(); + + assert!(node_txn.is_empty()); res } + /// Tests that the given node has broadcast a claim transaction against the provided revoked + /// HTLC transaction. + fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) { + let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + node_txn.retain(|tx| { + if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() { + check_spends!(tx, revoked_tx.clone()); + false + } else { true } + }); + assert!(node_txn.is_empty()); + } + fn check_preimage_claim(node: &Node, prev_txn: &Vec) -> Vec { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -3153,10 +4202,7 @@ mod tests { for tx in prev_txn { if node_txn[0].input[0].previous_output.txid == tx.txid() { - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(tx.txid(), tx.clone()); - node_txn[0].verify(&funding_tx_map).unwrap(); - + check_spends!(node_txn[0], tx.clone()); assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output assert_eq!(tx.input.len(), 1); // must spend a commitment tx @@ -3196,6 +4242,281 @@ mod tests { } } + macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + } + + #[test] + fn channel_reserve_test() { + use util::rng; + use std::sync::atomic::Ordering; + use ln::msgs::HandleError; + + macro_rules! get_channel_value_stat { + ($node: expr, $channel_id: expr) => {{ + let chan_lock = $node.node.channel_state.lock().unwrap(); + let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + chan.get_value_stat() + }} + } + + let mut nodes = create_network(3); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001); + + let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2); + + let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2); + + macro_rules! get_route_and_payment_hash { + ($recv_value: expr) => {{ + let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]); + (route, payment_hash, payment_preimage) + }} + }; + + macro_rules! expect_forward { + ($node: expr) => {{ + let mut events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + check_added_monitors!($node, 1); + let payment_event = SendEvent::from_event(events.remove(0)); + payment_event + }} + } + + macro_rules! expect_payment_received { + ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => { + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!($expected_payment_hash, *payment_hash); + assert_eq!($expected_recv_value, amt); + }, + _ => panic!("Unexpected event"), + } + } + }; + + let feemsat = 239; // somehow we know? + let total_fee_msat = (nodes.len() - 2) as u64 * 239; + + let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat; + + // attempt to send amt_msat > their_max_htlc_value_in_flight_msat + { + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1); + assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); + let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap(); + match err { + APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"), + _ => panic!("Unknown error variants"), + } + } + + let mut htlc_id = 0; + // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete + // nodes[0]'s wealth + loop { + let amt_msat = recv_value_0 + total_fee_msat; + if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat { + break; + } + send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0); + htlc_id += 1; + + let (stat01_, stat11_, stat12_, stat22_) = ( + get_channel_value_stat!(nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], chan_2.2), + get_channel_value_stat!(nodes[2], chan_2.2), + ); + + assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); + assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); + assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); + assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); + stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_; + } + + { + let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat; + // attempt to get channel_reserve violation + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1); + let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap(); + match err { + APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"), + _ => panic!("Unknown error variants"), + } + } + + // adding pending output + let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2; + let amt_msat_1 = recv_value_1 + total_fee_msat; + + let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1); + let payment_event_1 = { + nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap(); + + // channel reserve test with htlc pending output > 0 + let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat; + { + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1); + match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() { + APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"), + _ => panic!("Unknown error variants"), + } + } + + { + // test channel_reserve test on nodes[1] side + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1); + + // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&secp_ctx, &{ + let mut session_key = [0; 32]; + rng::fill_bytes(&mut session_key); + session_key + }).expect("RNG is bad!"); + + let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1; + let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap(); + let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash); + let msg = msgs::UpdateAddHTLC { + channel_id: chan_1.2, + htlc_id, + amount_msat: htlc_msat, + payment_hash: our_payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + }; + + let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); + match err { + HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + } + } + + // split the rest to test holding cell + let recv_value_21 = recv_value_2/2; + let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat; + { + let stat = get_channel_value_stat!(nodes[0], chan_1.2); + assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat); + } + + // now see if they go through on both sides + let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21); + // but this will stuck in the holding cell + nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap(); + check_added_monitors!(nodes[0], 0); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + + // test with outbound holding cell amount > 0 + { + let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1); + match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() { + APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"), + _ => panic!("Unknown error variants"), + } + } + + let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22); + // this will also stuck in the holding cell + nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap(); + check_added_monitors!(nodes[0], 0); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + + // flush the pending htlc + let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + + let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap(); + check_added_monitors!(nodes[0], 1); + let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_none.is_none()); + check_added_monitors!(nodes[0], 1); + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_11 = expect_forward!(nodes[1]); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1); + + // flush the htlcs in the holding cell + assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap(); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_3 = expect_forward!(nodes[1]); + assert_eq!(payment_event_3.msgs.len(), 2); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap(); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap(); + + commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[2]); + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(our_payment_hash_21, *payment_hash); + assert_eq!(recv_value_21, amt); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(our_payment_hash_22, *payment_hash); + assert_eq!(recv_value_22, amt); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); + + let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat); + let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); + assert_eq!(stat0.value_to_self_msat, expected_value_to_self); + assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat); + + let stat2 = get_channel_value_stat!(nodes[2], chan_2.2); + assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22); + } + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -3245,11 +4566,7 @@ mod tests { ($node: expr, $prev_node: expr, $preimage: expr) => { { assert!($node.node.claim_funds($preimage)); - { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - added_monitors.clear(); - } + check_added_monitors!($node, 1); let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -3283,25 +4600,34 @@ mod tests { assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); + { // Cheat and reset nodes[4]'s height to 1 + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1); + } + + assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1); + assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1); // One pending HTLC to time out: let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0; + // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for + // buffer space). { let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[3].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]); - for i in 2..TEST_FINAL_CLTV - 3 { + nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]); + for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]); } let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT); - // Claim the payment on nodes[3], giving it knowledge of the preimage + // Claim the payment on nodes[4], giving it knowledge of the preimage claim_funds!(nodes[4], nodes[3], payment_preimage_2); header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[4].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]); - for i in 2..TEST_FINAL_CLTV - 3 { + nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]); + for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]); } @@ -3324,6 +4650,13 @@ mod tests { let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; // Get the will-be-revoked local txn from nodes[0] let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid()); + assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present + assert_eq!(revoked_local_txn[1].input.len(), 1); + assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout // Revoke the old state claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); @@ -3332,12 +4665,11 @@ mod tests { nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); - assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected + assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output - let mut funding_tx_map = HashMap::new(); - funding_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); - node_txn[0].verify(&funding_tx_map).unwrap(); + check_spends!(node_txn[0], revoked_local_txn[0].clone()); node_txn.swap_remove(0); } test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE); @@ -3346,19 +4678,316 @@ mod tests { let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1); - - //TODO: At this point nodes[1] should claim the revoked HTLC-Timeout output, but that's - //not yet implemented in ChannelMonitor + test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 0); + } - // Check that we processed all pending events - for node in nodes { - assert_eq!(node.node.get_and_clear_pending_events().len(), 0); - assert_eq!(node.chan_monitor.added_monitors.lock().unwrap().len(), 0); + #[test] + fn revoked_output_claim() { + // Simple test to ensure a node will claim a revoked output when a stale remote commitment + // transaction is broadcast by its counterparty + let nodes = create_network(2); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 1); + // Only output is the full channel value back to nodes[0]: + assert_eq!(revoked_local_txn[0].output.len(), 1); + // Send a payment through, updating everyone's latest commitment txn + send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000); + + // Inform nodes[1] that nodes[0] broadcast a stale tx + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once + + assert_eq!(node_txn[0], node_txn[2]); + + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + check_spends!(node_txn[1], chan_1.3.clone()); + + // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + get_announce_close_broadcast_events(&nodes, 0, 1); + } + + #[test] + fn claim_htlc_outputs_shared_tx() { + // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx + let nodes = create_network(2); + + // Create some new channel: + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + + // Get the will-be-revoked local txn from node[0] + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); + assert_eq!(revoked_local_txn[1].input.len(), 1); + assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone()); + + //Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + + { + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 4); + + assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs + check_spends!(node_txn[0], revoked_local_txn[0].clone()); + + assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning + + let mut witness_lens = BTreeSet::new(); + witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len()); + witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 3); + assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + + // Next nodes[1] broadcasts its current local tx state: + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager + + assert_eq!(node_txn[2].input.len(), 1); + let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap(); + assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid()); + assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); + assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + } + + #[test] + fn claim_htlc_outputs_single_tx() { + // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx + let nodes = create_network(2); + + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this + // time as two different claim transactions as we're gonna to timeout htlc with given a high current height + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + + // Get the will-be-revoked local txn from node[0] + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + + //Revoke the old state + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + + { + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + + assert_eq!(node_txn[0], node_txn[7]); + assert_eq!(node_txn[1], node_txn[8]); + assert_eq!(node_txn[2], node_txn[9]); + assert_eq!(node_txn[3], node_txn[10]); + assert_eq!(node_txn[4], node_txn[11]); + assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger + assert_eq!(node_txn[4], node_txn[6]); + + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[1].input.len(), 1); + assert_eq!(node_txn[2].input.len(), 1); + + let mut revoked_tx_map = HashMap::new(); + revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone()); + node_txn[0].verify(&revoked_tx_map).unwrap(); + node_txn[1].verify(&revoked_tx_map).unwrap(); + node_txn[2].verify(&revoked_tx_map).unwrap(); + + let mut witness_lens = BTreeSet::new(); + witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len()); + witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 3); + assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + + assert_eq!(node_txn[3].input.len(), 1); + check_spends!(node_txn[3], chan_1.3.clone()); + + assert_eq!(node_txn[4].input.len(), 1); + let witness_script = node_txn[4].input[0].witness.last().unwrap(); + assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid()); + assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); + assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); + } + + #[test] + fn test_htlc_ignore_latest_remote_commitment() { + // Test that HTLC transactions spending the latest remote commitment transaction are simply + // ignored if we cannot claim them. This originally tickled an invalid unwrap(). + let nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + route_payment(&nodes[0], &[&nodes[1]], 10000000); + nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); + { + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + } + + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 2); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + + { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } } + + // Duplicate the block_connected call since this may happen due to other listeners + // registering new transactions + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + } + + #[test] + fn test_force_close_fail_back() { + // Check which HTLCs are failed-backwards on channel force-closure + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); + + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + + let mut payment_event = { + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let mut events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + payment_event = SendEvent::from_event(events_2.remove(0)); + assert_eq!(payment_event.msgs.len(), 1); + + check_added_monitors!(nodes[1], 1); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + + // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous + // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC + // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). + + nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); + let events_3 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + + let tx = { + let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); + // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't + // have a use for it unless nodes[2] learns the preimage somehow, the funds will go + // back to nodes[1] upon timeout otherwise. + assert_eq!(node_txn.len(), 1); + node_txn.remove(0) + }; + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + + let events_4 = nodes[1].node.get_and_clear_pending_events(); + // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + + // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. + { + let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap(); + monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap() + .provide_payment_preimage(&our_payment_hash, &our_payment_preimage); + } + nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid()); + assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success + assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success + + check_spends!(node_txn[0], tx); } #[test] @@ -3382,11 +5011,1016 @@ mod tests { while !headers.is_empty() { nodes[0].node.block_disconnected(&headers.pop().unwrap()); } + { + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + } let channel_state = nodes[0].node.channel_state.lock().unwrap(); assert_eq!(channel_state.by_id.len(), 0); assert_eq!(channel_state.short_to_id.len(), 0); } + /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas + /// for claims/fails they are separated out. + fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { + let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id()); + let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id()); + + let mut resp_1 = Vec::new(); + for msg in reestablish_1 { + resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap()); + } + if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { + check_added_monitors!(node_b, 1); + } else { + check_added_monitors!(node_b, 0); + } + + let mut resp_2 = Vec::new(); + for msg in reestablish_2 { + resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap()); + } + if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { + check_added_monitors!(node_a, 1); + } else { + check_added_monitors!(node_a, 0); + } + + // We dont yet support both needing updates, as that would require a different commitment dance: + assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) || + (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0)); + + for chan_msgs in resp_1.drain(..) { + if pre_all_htlcs { + let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); + let _announcement_sigs_opt = a.unwrap(); + //TODO: Test announcement_sigs re-sending when we've implemented it + } else { + assert!(chan_msgs.0.is_none()); + } + if pending_raa.0 { + assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); + check_added_monitors!(node_a, 1); + } else { + assert!(chan_msgs.1.is_none()); + } + if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { + let commitment_update = chan_msgs.2.unwrap(); + if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed + assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize); + } else { + assert!(commitment_update.update_add_htlcs.is_empty()); + } + assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); + assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + for update_add in commitment_update.update_add_htlcs { + node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap(); + } + for update_fulfill in commitment_update.update_fulfill_htlcs { + node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap(); + } + for update_fail in commitment_update.update_fail_htlcs { + node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap(); + } + + if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed + commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); + } else { + let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(node_a, 1); + assert!(as_commitment_signed.is_none()); + assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(node_b, 1); + } + } else { + assert!(chan_msgs.2.is_none()); + } + } + + for chan_msgs in resp_2.drain(..) { + if pre_all_htlcs { + let _announcement_sigs_opt = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap(); + //TODO: Test announcement_sigs re-sending when we've implemented it + } else { + assert!(chan_msgs.0.is_none()); + } + if pending_raa.1 { + assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none()); + check_added_monitors!(node_b, 1); + } else { + assert!(chan_msgs.1.is_none()); + } + if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { + let commitment_update = chan_msgs.2.unwrap(); + if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed + assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize); + } + assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); + assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + for update_add in commitment_update.update_add_htlcs { + node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap(); + } + for update_fulfill in commitment_update.update_fulfill_htlcs { + node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap(); + } + for update_fail in commitment_update.update_fail_htlcs { + node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap(); + } + + if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed + commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); + } else { + let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap(); + check_added_monitors!(node_b, 1); + assert!(bs_commitment_signed.is_none()); + assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(node_a, 1); + } + } else { + assert!(chan_msgs.2.is_none()); + } + } + } + + #[test] + fn test_simple_peer_disconnect() { + // Test that we can reconnect when there are no lost messages + let nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; + let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3); + fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); + + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); + { + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentSent { payment_preimage } => { + assert_eq!(payment_preimage, payment_preimage_3); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentFailed { payment_hash, rejected_by_dest } => { + assert_eq!(payment_hash, payment_hash_5); + assert!(rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + } + + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); + fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); + } + + fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { + // Test that we can reconnect when in-flight HTLC updates get dropped + let mut nodes = create_network(2); + if messages_delivered == 0 { + create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); + // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect) + } else { + create_announced_chan_between_nodes(&nodes, 0, 1); + } + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + let payment_event = { + nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); + + if messages_delivered < 2 { + // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! + } else { + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 3 { + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 4 { + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap(); + assert!(as_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 5 { + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + } + } + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + if messages_delivered < 2 { + // Even if the funding_locked messages get exchanged, as long as nothing further was + // received on either side, both sides will need to resend them. + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 2 { + // nodes[0] still wants its RAA + commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); + } else if messages_delivered == 3 { + // nodes[0] still wants its commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 4 { + // nodes[1] still wants its final RAA + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); + } else if messages_delivered == 5 { + // Everything was delivered... + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + nodes[1].node.claim_funds(payment_preimage_1); + check_added_monitors!(nodes[1], 1); + + let events_3 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + let (update_fulfill_htlc, commitment_signed) = match events_3[0] { + Event::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if messages_delivered >= 1 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap(); + + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(payment_preimage_1, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + + if messages_delivered >= 2 { + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + + if messages_delivered >= 3 { + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 4 { + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + if messages_delivered >= 5 { + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + } + } + } + } + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + if messages_delivered < 2 { + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + //TODO: Deduplicate PaymentSent events, then enable this if: + //if messages_delivered < 1 { + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(payment_preimage_1, *payment_preimage); + }, + _ => panic!("Unexpected event"), + } + //} + } else if messages_delivered == 2 { + // nodes[0] still wants its RAA + commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); + } else if messages_delivered == 3 { + // nodes[0] still wants its commitment_signed + reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); + } else if messages_delivered == 4 { + // nodes[1] still wants its final RAA + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); + } else if messages_delivered == 5 { + // Everything was delivered... + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // Channel should still work fine... + let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_drop_messages_peer_disconnect_a() { + do_test_drop_messages_peer_disconnect(0); + do_test_drop_messages_peer_disconnect(1); + do_test_drop_messages_peer_disconnect(2); + } + + #[test] + fn test_drop_messages_peer_disconnect_b() { + do_test_drop_messages_peer_disconnect(3); + do_test_drop_messages_peer_disconnect(4); + do_test_drop_messages_peer_disconnect(5); + } + + #[test] + fn test_funding_peer_disconnect() { + // Test that we can lock in our funding tx while disconnected + let nodes = create_network(2); + let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version); + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert!(announcement_sigs.is_none()); + }, + _ => panic!("Unexpected event"), + } + + confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version); + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(announcement_sigs.is_none()); + }, + _ => panic!("Unexpected event"), + } + + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // TODO: We shouldn't need to manually pass list_usable_chanels here once we support + // rebroadcasting announcement_signatures upon reconnect. + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } + + #[test] + fn test_drop_messages_peer_disconnect_dual_htlc() { + // Test that we can handle reconnecting when both sides of a channel have pending + // commitment_updates when we disconnect. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + let (_, commitment_update) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + assert!(commitment_update.is_none()); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + + assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1); + assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + assert!(bs_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + let bs_second_commitment_signed = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap().unwrap(); + assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + + let as_commitment_signed = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + assert!(as_commitment_signed.update_add_htlcs.is_empty()); + assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_htlcs.is_empty()); + assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_signed.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + + let (as_revoke_and_ack, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap(); + assert!(as_second_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap(); + assert!(bs_third_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + let events_4 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + match events_4[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt: _ } => { + assert_eq!(payment_hash_2, *payment_hash); + }, + _ => panic!("Unexpected event"), + } + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_simple_monitor_permanent_update_fail() { + // Test that we handle a simple permanent monitor update failure + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + }; + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { + // Test that we can recover from a simple temporary monitor update failure optionally with + // a disconnect in between + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_1.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let mut events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + let payment_event = SendEvent::from_event(events_2.pop().unwrap()); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_3 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_1, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); + + // Now set it to failed again... + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_4.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + if disconnect { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let events_5 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + + // TODO: Once we hit the chain with the failure transaction we should check that we get a + // PaymentFailed event + + assert_eq!(nodes[0].node.list_channels().len(), 0); + } + + #[test] + fn test_simple_monitor_temporary_update_fail() { + do_test_simple_monitor_temporary_update_fail(false); + do_test_simple_monitor_temporary_update_fail(true); + } + + fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { + let disconnect_flags = 8 | 16; + + // Test that we can recover from a temporary monitor update failure with some in-flight + // HTLCs going on at the same time potentially with some disconnection thrown in. + // * First we route a payment, then get a temporary monitor update failure when trying to + // route a second payment. We then claim the first payment. + // * If disconnect_count is set, we will disconnect at this point (which is likely as + // TemporaryFailure likely indicates net disconnect which resulted in failing to update + // the ChannelMonitor on a watchtower). + // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment + // immediately, otherwise we wait sconnect and deliver them via the reconnect + // channel_reestablish processing (ie disconnect_count & 16 makes no sense if + // disconnect_count & !disconnect_flags is 0). + // * We then update the channel monitor, reconnecting if disconnect_count is set and walk + // through message sending, potentially disconnect/reconnecting multiple times based on + // disconnect_count, to get the update_fulfill_htlc through. + // * We then walk through more message exchanges to get the original update_add_htlc + // through, swapping message ordering based on disconnect_count & 8 and optionally + // disconnect/reconnecting based on disconnect_count. + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); + + // Now try to send a second payment which will fail to send + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); } + check_added_monitors!(nodes[0], 1); + + let events_1 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_1.is_empty()); + assert_eq!(nodes[0].node.list_channels().len(), 1); + + // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] + // but nodes[0] won't respond since it is frozen. + assert!(nodes[1].node.claim_funds(payment_preimage_1)); + check_added_monitors!(nodes[1], 1); + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { + Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + + if (disconnect_count & 16) == 0 { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + } + + (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + if disconnect_count & !disconnect_flags > 0 { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + } + + // Now fix monitor updating... + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + macro_rules! disconnect_reconnect_peers { () => { { + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + (reestablish_1, reestablish_2, as_resp, bs_resp) + } } } + + let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert!(events_4.is_empty()); + + let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + assert_eq!(reestablish_2.len(), 1); + + let mut as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap(); + check_added_monitors!(nodes[0], 0); + let mut bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap(); + check_added_monitors!(nodes[1], 0); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + assert!(bs_resp.1.is_none()); + if (disconnect_count & 16) == 0 { + assert!(bs_resp.2.is_none()); + + assert!(as_resp.1.is_some()); + assert!(as_resp.2.is_some()); + assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst); + } else { + assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); + assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); + assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); + assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + + assert!(as_resp.1.is_none()); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap(); + let events_3 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + }, + _ => panic!("Unexpected event"), + } + + let (as_resp_raa, as_resp_cu) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap(); + assert!(as_resp_cu.is_none()); + check_added_monitors!(nodes[0], 1); + + as_resp.1 = Some(as_resp_raa); + bs_resp.2 = None; + } + + if disconnect_count & !disconnect_flags > 1 { + let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + + if (disconnect_count & 16) == 0 { + assert!(reestablish_1 == second_reestablish_1); + assert!(reestablish_2 == second_reestablish_2); + } + assert!(as_resp == second_as_resp); + assert!(bs_resp == second_bs_resp); + } + + (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap()) + } else { + let mut events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 2); + (SendEvent::from_event(events_4.remove(0)), match events_4[0] { + Event::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event"), + }) + }; + + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting an RAA from nodes[0] still + check_added_monitors!(nodes[1], 1); + + if disconnect_count & !disconnect_flags > 2 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.is_none()); + } + + let as_commitment_update; + let bs_second_commitment_update; + + macro_rules! handle_bs_raa { () => { + as_commitment_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + assert!(as_commitment_update.update_add_htlcs.is_empty()); + assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + } } + + macro_rules! handle_initial_raa { () => { + bs_second_commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap().unwrap(); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + } } + + if (disconnect_count & 8) == 0 { + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } else { + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + + assert!(bs_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } + + let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); + assert!(as_commitment_signed.is_none()); + check_added_monitors!(nodes[0], 1); + + let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); + assert!(bs_third_commitment_signed.is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[1], 1); + + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_monitor_temporary_update_fail_a() { + do_test_monitor_temporary_update_fail(0); + do_test_monitor_temporary_update_fail(1); + do_test_monitor_temporary_update_fail(2); + do_test_monitor_temporary_update_fail(3); + do_test_monitor_temporary_update_fail(4); + do_test_monitor_temporary_update_fail(5); + } + + #[test] + fn test_monitor_temporary_update_fail_b() { + do_test_monitor_temporary_update_fail(2 | 8); + do_test_monitor_temporary_update_fail(3 | 8); + do_test_monitor_temporary_update_fail(4 | 8); + do_test_monitor_temporary_update_fail(5 | 8); + } + + #[test] + fn test_monitor_temporary_update_fail_c() { + do_test_monitor_temporary_update_fail(1 | 16); + do_test_monitor_temporary_update_fail(2 | 16); + do_test_monitor_temporary_update_fail(3 | 16); + do_test_monitor_temporary_update_fail(2 | 8 | 16); + do_test_monitor_temporary_update_fail(3 | 8 | 16); + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs @@ -3400,7 +6034,7 @@ mod tests { let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap(); let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap(); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } ); + let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key); let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key); @@ -3447,7 +6081,7 @@ mod tests { let unsigned_msg = dummy_unsigned_msg!(); sign_msg!(unsigned_msg); assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } ); + let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); // Configured with Network::Testnet let mut unsigned_msg = dummy_unsigned_msg!();