X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=ccfb9f776973a217b812fbdf59eb8058bee0c96a;hb=832fc4fd4435fa236f15d3e737bebf64619ff60e;hp=6933198c608e0e56f59ffe510d2b2b6583fb3dde;hpb=6969fc997bd18d21c5a25fae34bc1eb123bf512b;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 6933198c..ccfb9f77 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -22,7 +22,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY}; use ln::router::{Route,RouteHop}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError}; @@ -63,6 +63,7 @@ use std::time::{Instant,Duration}; mod channel_held_info { use ln::msgs; use ln::router::Route; + use ln::channelmanager::PaymentHash; use secp256k1::key::SecretKey; /// Stores the info we will need to send when we want to forward an HTLC onwards @@ -70,7 +71,7 @@ mod channel_held_info { pub struct PendingForwardHTLCInfo { pub(super) onion_packet: Option, pub(super) incoming_shared_secret: [u8; 32], - pub(super) payment_hash: [u8; 32], + pub(super) payment_hash: PaymentHash, pub(super) short_channel_id: u64, pub(super) amt_to_forward: u64, pub(super) outgoing_cltv_value: u32, @@ -90,7 +91,7 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub struct HTLCPreviousHopData { pub(super) short_channel_id: u64, pub(super) htlc_id: u64, @@ -98,7 +99,7 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), OutboundRoute { @@ -133,9 +134,24 @@ mod channel_held_info { } pub(super) use self::channel_held_info::*; +/// payment_hash type, use to cross-lock hop +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct PaymentHash(pub [u8;32]); +/// payment_preimage type, use to route payment between hop +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct PaymentPreimage(pub [u8;32]); + +type ShutdownResult = (Vec, Vec<(HTLCSource, PaymentHash)>); + +/// Error type returned across the channel_state mutex boundary. When an Err is generated for a +/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel +/// immediately (ie with no further calls on it made). Thus, this step happens inside a +/// channel_state lock. We then return the set of things that need to be done outside the lock in +/// this struct and call handle_error!() on it. + struct MsgHandleErrInternal { err: msgs::HandleError, - needs_channel_force_close: bool, + shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { #[inline] @@ -150,11 +166,15 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: false, + shutdown_finish: None, } } #[inline] - fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self { + fn from_no_close(err: msgs::HandleError) -> Self { + Self { err, shutdown_finish: None } + } + #[inline] + fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: HandleError { err, @@ -165,18 +185,10 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: true, + shutdown_finish: Some((shutdown_res, channel_update)), } } #[inline] - fn from_maybe_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: true } - } - #[inline] - fn from_no_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: false } - } - #[inline] fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { @@ -194,28 +206,7 @@ impl MsgHandleErrInternal { }), }, }, - needs_channel_force_close: false, - } - } - #[inline] - fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { - Self { - err: match err { - ChannelError::Ignore(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::IgnoreError), - }, - ChannelError::Close(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: msg.to_string() - }, - }), - }, - }, - needs_channel_force_close: true, + shutdown_finish: None, } } } @@ -266,7 +257,7 @@ struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - claimable_htlcs: HashMap<[u8; 32], Vec>, + claimable_htlcs: HashMap>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pending_msg_events: Vec, @@ -276,7 +267,7 @@ struct MutChannelHolder<'a> { short_to_id: &'a mut HashMap, next_forward: &'a mut Instant, forward_htlcs: &'a mut HashMap>, - claimable_htlcs: &'a mut HashMap<[u8; 32], Vec>, + claimable_htlcs: &'a mut HashMap>, pending_msg_events: &'a mut Vec, } impl ChannelHolder { @@ -350,16 +341,17 @@ pub struct ChannelManager { /// ie the node we forwarded the payment on to should always have enough room to reliably time out /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). -const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO? +const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that -// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have -// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the -// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC. +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS + +// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within +// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it +// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel +// on-chain to time out the HTLC. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. @@ -408,26 +400,14 @@ macro_rules! handle_error { ($self: ident, $internal: expr, $their_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, needs_channel_force_close }) => { - if needs_channel_force_close { - match &err.action { - &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {}, - &Some(msgs::ErrorAction::IgnoreError) => {}, - &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &None => {}, + Err(MsgHandleErrInternal { err, shutdown_finish }) => { + if let Some((shutdown_res, update_option)) = shutdown_finish { + $self.finish_force_close_channel(shutdown_res); + if let Some(update) = update_option { + let mut channel_state = $self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); } } Err(err) @@ -436,6 +416,99 @@ macro_rules! handle_error { } } +macro_rules! break_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! try_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! return_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { + if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + // TODO: $failed_fails is dropped here, which will cause other channels to hit the + // chain in a confused state! We need to move them into the ChannelMonitor which + // will be responsible for failing backwards once things confirm on-chain. + // It's ok that we drop $failed_forwards here - at this point we'd rather they + // broadcast HTLC-Timeout and pay the associated fees to get their funds back than + // us bother trying to claim it just to forward on to another peer. If we're + // splitting hairs we'd prefer to claim payments that were to us, but we haven't + // given up the preimage yet, so might as well just wait until the payment is + // retried, avoiding the on-chain fees. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + }, + } + } +} + +// Does not break in case of TemporaryFailure! +macro_rules! maybe_break_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + }, + } + } +} + impl ChannelManager { /// Constructs a new ChannelManager to hold several channels and route between them. /// @@ -609,8 +682,9 @@ impl ChannelManager { } #[inline] - fn finish_force_close_channel(&self, shutdown_res: (Vec, Vec<(HTLCSource, [u8; 32])>)) { + fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (local_txn, mut failed_htlcs) = shutdown_res; + log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { // unknown_next_peer...I dunno who that is anymore.... self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); @@ -618,13 +692,6 @@ impl ChannelManager { for tx in local_txn { self.tx_broadcaster.broadcast_transaction(&tx); } - //TODO: We need to have a way where outbound HTLC claims can result in us claiming the - //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards). - //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and - //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after - //timeouts are hit and our claims confirm). - //TODO: In any case, we need to make sure we remove any pending htlc tracking (via - //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel } /// Force closes a channel, immediately broadcasting the latest local commitment transaction to @@ -644,6 +711,7 @@ impl ChannelManager { return; } }; + log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown()); if let Ok(update) = self.get_channel_update(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); @@ -661,33 +729,6 @@ impl ChannelManager { } } - fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { - match err { - ChannelMonitorUpdateErr::PermanentFailure => { - let mut chan = { - let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - if let Some(short_id) = chan.get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - chan - }; - mem::drop(channel_state_lock); - self.finish_force_close_channel(chan.force_shutdown()); - if let Ok(update) = self.get_channel_update(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - channel.monitor_update_failed(reason); - }, - } - } - #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) { assert_eq!(shared_secret.len(), 32); @@ -831,7 +872,7 @@ impl ChannelManager { } const ZERO:[u8; 21*65] = [0; 21*65]; - fn construct_onion_packet(mut payloads: Vec, onion_keys: Vec, associated_data: &[u8; 32]) -> msgs::OnionPacket { + fn construct_onion_packet(mut payloads: Vec, onion_keys: Vec, associated_data: &PaymentHash) -> msgs::OnionPacket { let mut buf = Vec::with_capacity(21*65); buf.resize(21*65, 0); @@ -868,7 +909,7 @@ impl ChannelManager { let mut hmac = Hmac::new(Sha256::new(), &keys.mu); hmac.input(&packet_data); - hmac.input(&associated_data[..]); + hmac.input(&associated_data.0[..]); hmac.raw_result(&mut hmac_res); } @@ -990,7 +1031,7 @@ impl ChannelManager { let mut hmac = Hmac::new(Sha256::new(), &mu); hmac.input(&msg.onion_routing_packet.hop_data); - hmac.input(&msg.payment_hash); + hmac.input(&msg.payment_hash.0[..]); if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) { return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!()); } @@ -1187,8 +1228,18 @@ impl ChannelManager { /// May generate a SendHTLCs message event on success, which should be relayed. /// /// Raises APIError::RoutError when invalid route or forward parameter - /// (cltv_delta, fee, node public key) is specified - pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { + /// (cltv_delta, fee, node public key) is specified. + /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// (including due to previous monitor update failure or new permanent monitor update failure). + /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// relevant updates. + /// + /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed + /// and you may wish to retry via a different route immediately. + /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably + /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry + /// the payment via a different route unless you intend to pay twice! + pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> { if route.hops.len() < 1 || route.hops.len() > 20 { return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); } @@ -1199,11 +1250,7 @@ impl ChannelManager { } } - let session_priv = SecretKey::from_slice(&self.secp_ctx, &{ - let mut session_key = [0; 32]; - rng::fill_bytes(&mut session_key); - session_key - }).expect("RNG is bad!"); + let session_priv = self.keys_manager.get_session_key(); let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; @@ -1213,63 +1260,73 @@ impl ChannelManager { let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state = self.channel_state.lock().unwrap(); - let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { - None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), - Some(id) => id.clone(), - }; + let err: Result<(), _> = loop { + let mut channel_lock = self.channel_state.lock().unwrap(); + + let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { + None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), + Some(id) => id.clone(), + }; + + let channel_state = channel_lock.borrow_parts(); + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { + if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + route: route.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet), channel_state, chan) + } { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resent the commitment update once we unfree monitor + // updating, so we have to take special care that we don't return + // something else in case we will resend later! + return Err(APIError::MonitorUpdateFailed); + } - let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); - } - chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - route: route.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet).map_err(|he| - match he { - ChannelError::Close(err) => { - // TODO: We need to close the channel here, but for that to be safe we have - // to do all channel closure inside the channel_state lock which is a - // somewhat-larger refactor, so we leave that for later. - APIError::ChannelUnavailable { err } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: route.hops.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); }, - ChannelError::Ignore(err) => APIError::ChannelUnavailable { err }, + None => {}, } - )? + } else { unreachable!(); } + return Ok(()); }; - match res { - Some((update_add, commitment_signed, chan_monitor)) => { - if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); - return Err(APIError::MonitorUpdateFailed); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: route.hops.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); + match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: route.hops.first().unwrap().pubkey, + action: e.action, + }); + } + Err(APIError::ChannelUnavailable { err: e.err }) }, - None => {}, } - - Ok(()) } /// Call this upon creation of a funding transaction for the given channel. @@ -1290,7 +1347,9 @@ impl ChannelManager { match channel_state.by_id.remove(temporary_channel_id) { Some(mut chan) => { (chan.get_outbound_funding_created(funding_txo) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, chan.channel_id())) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + } else { unreachable!(); }) , chan) }, None => return @@ -1428,7 +1487,7 @@ impl ChannelManager { }, }; if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!();// but def dont push the event... + unimplemented!(); } channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), @@ -1475,7 +1534,7 @@ impl ChannelManager { } /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event. - pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool { + pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, reason: PaymentFailReason) -> bool { let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = Some(self.channel_state.lock().unwrap()); @@ -1495,9 +1554,10 @@ impl ChannelManager { /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { match source { HTLCSource::OutboundRoute { .. } => { + log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); mem::drop(channel_state_lock); if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error { let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone()); @@ -1513,16 +1573,22 @@ impl ChannelManager { rejected_by_dest: !payment_retryable, }); } else { - panic!("should have onion error packet here"); + //TODO: Pass this back (see GH #243) + self.pending_events.lock().unwrap().push(events::Event::PaymentFailed { + payment_hash: payment_hash.clone(), + rejected_by_dest: false, // We failed it ourselves, can't blame them + }); } }, HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => { let err_packet = match onion_error { HTLCFailReason::Reason { failure_code, data } => { + log_trace!(self, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) }, HTLCFailReason::ErrorPacket { err } => { + log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built ErrorPacket", log_bytes!(payment_hash.0)); ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) } }; @@ -1567,11 +1633,11 @@ impl ChannelManager { /// should probably kick the net layer to go send messages if this returns true! /// /// May panic if called except in response to a PaymentReceived event. - pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool { + pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool { let mut sha = Sha256::new(); - sha.input(&payment_preimage); - let mut payment_hash = [0; 32]; - sha.result(&mut payment_hash); + sha.input(&payment_preimage.0[..]); + let mut payment_hash = PaymentHash([0; 32]); + sha.result(&mut payment_hash.0[..]); let _ = self.total_consistency_lock.read().unwrap(); @@ -1585,7 +1651,7 @@ impl ChannelManager { true } else { false } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: [u8; 32]) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage) { match source { HTLCSource::OutboundRoute { .. } => { mem::drop(channel_state_lock); @@ -1666,6 +1732,13 @@ impl ChannelManager { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { + // TODO: There may be some pending HTLCs that we intended to fail + // backwards when a monitor update failed. We should make sure + // knowledge of those gets moved into the appropriate in-memory + // ChannelMonitor and they get failed backwards once we get + // on-chain confirmations. + // Note I think #198 addresses this, so once its merged a test + // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -1752,19 +1825,19 @@ impl ChannelManager { fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.temporary_channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg, &self.default_configuration) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; - (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, //TODO: same as above - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1778,22 +1851,16 @@ impl ChannelManager { } fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let (chan, funding_msg, monitor_update) = { - let mut channel_state = self.channel_state.lock().unwrap(); + let ((funding_msg, monitor_update), chan) = { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - match chan.get_mut().funding_created(msg) { - Ok((funding_msg, monitor_update)) => { - (chan.remove(), funding_msg, monitor_update) - }, - Err(e) => { - return Err(e).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id)) - } - } + (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } @@ -1822,20 +1889,21 @@ impl ChannelManager { fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let (funding_txo, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } - (chan.get_funding_txo().unwrap(), chan.get_user_id()) + (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1849,15 +1917,14 @@ impl ChannelManager { fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Some(announcement_sigs) = self.get_announcement_sigs(chan) { + try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); + if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) { channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: their_node_id.clone(), msg: announcement_sigs, @@ -1865,7 +1932,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -1880,7 +1947,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: their_node_id.clone(), @@ -1928,7 +1995,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (closing_signed, tx) = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = closing_signed { channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: their_node_id.clone(), @@ -1977,18 +2044,18 @@ impl ChannelManager { let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { + if !chan.get().is_usable() { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { - let chan_update = self.get_channel_update(chan); + let chan_update = self.get_channel_update(chan.get()); pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -2005,26 +2072,29 @@ impl ChannelManager { })); } } - chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - chan.update_fulfill_htlc(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + let mut channel_lock = self.channel_state.lock().unwrap(); + let htlc_source = { + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } }; - self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); Ok(()) } @@ -2226,53 +2296,54 @@ impl ChannelManager { } fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }?; + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } Ok(()) } fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if (msg.failure_code & 0x8000) == 0 { - return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id)); + try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); Ok(()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: their_node_id.clone(), @@ -2299,7 +2370,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -2340,16 +2411,16 @@ impl ChannelManager { let (pending_forwards, mut pending_failures, short_channel_id) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2363,9 +2434,9 @@ impl ChannelManager { msg, }); } - (pending_forwards, pending_failures, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) + (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; for failure in pending_failures.drain(..) { @@ -2377,41 +2448,44 @@ impl ChannelManager { } fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { + if !chan.get().is_usable() { return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); } let our_node_id = self.get_our_node_id(); - let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (announcement, our_bitcoin_sig) = + try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); - let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id); - secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action); - secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action); + if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || + self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { + try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); + } let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); @@ -2423,10 +2497,10 @@ impl ChannelManager { bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig }, contents: announcement, }, - update_msg: self.get_channel_update(chan).unwrap(), // can only fail if we're not in a ready state + update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state }); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } Ok(()) } @@ -2435,16 +2509,26 @@ impl ChannelManager { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) = + try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan); if let Some(monitor) = channel_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + if revoke_and_ack.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + if commitment_update.is_none() { + order = RAACommitmentOrder::RevokeAndACKFirst; + } + return_monitor_err!(self, e, channel_state, chan, order); + //TODO: Resend the funding_locked if needed once we get the monitor running again } } if let Some(msg) = funding_locked { @@ -2487,7 +2571,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -2498,54 +2582,83 @@ impl ChannelManager { #[doc(hidden)] pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = channel_state_lock.borrow_parts(); + let their_node_id; + let err: Result<(), _> = loop { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&channel_id) { - None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), - Some(chan) => { - if !chan.is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); - } - if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw) - .map_err(|e| match e { - ChannelError::Ignore(err) => APIError::APIMisuseError{err}, - ChannelError::Close(err) => { - // TODO: We need to close the channel here, but for that to be safe we have - // to do all channel closure inside the channel_state lock which is a - // somewhat-larger refactor, so we leave that for later. - APIError::APIMisuseError{err} + match channel_state.by_id.entry(channel_id) { + hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + hash_map::Entry::Occupied(mut chan) => { + if !chan.get().is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); + } + if chan.get().is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); + } + their_node_id = chan.get().get_their_node_id(); + if let Some((update_fee, commitment_signed, chan_monitor)) = + break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan) + { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, }, - })? { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, + }, + } + return Ok(()) + }; + + match handle_error!(self, err, their_node_id) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, }); } + Err(APIError::APIMisuseError { err: e.err }) }, } - Ok(()) } } impl events::MessageSendEventsProvider for ChannelManager { fn get_and_clear_pending_msg_events(&self) -> Vec { + // TODO: Event release to users and serialization is currently race-y: its very easy for a + // user to serialize a ChannelManager with pending events in it and lose those events on + // restart. This is doubly true for the fail/fulfill-backs from monitor events! + { + //TODO: This behavior should be documented. + for htlc_update in self.monitor.fetch_pending_htlc_updated() { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } else { + log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + } + } + } + let mut ret = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); mem::swap(&mut ret, &mut channel_state.pending_msg_events); @@ -2555,6 +2668,22 @@ impl events::MessageSendEventsProvider for ChannelManager { impl events::EventsProvider for ChannelManager { fn get_and_clear_pending_events(&self) -> Vec { + // TODO: Event release to users and serialization is currently race-y: its very easy for a + // user to serialize a ChannelManager with pending events in it and lose those events on + // restart. This is doubly true for the fail/fulfill-backs from monitor events! + { + //TODO: This behavior should be documented. + for htlc_update in self.monitor.fetch_pending_htlc_updated() { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } else { + log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() }); + } + } + } + let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); mem::swap(&mut ret, &mut *pending_events); @@ -2564,6 +2693,8 @@ impl events::EventsProvider for ChannelManager { impl ChainListener for ChannelManager { fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) { + let header_hash = header.bitcoin_hash(); + log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len()); let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { @@ -2588,16 +2719,15 @@ impl ChainListener for ChannelManager { } else if let Err(e) = chan_res { pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_their_node_id(), - action: e.action, + action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }), }); - if channel.is_shutdown() { - return false; - } + return false; } if let Some(funding_txo) = channel.get_funding_txo() { for tx in txn_matched { for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { + log_trace!(self, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id())); if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -2638,7 +2768,7 @@ impl ChainListener for ChannelManager { self.finish_force_close_channel(failure); } self.latest_block_height.store(height as usize, Ordering::Release); - *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash(); + *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; } /// We force-close the channel without letting our counterparty participate in the shutdown @@ -3247,8 +3377,9 @@ mod tests { use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor}; use chain::keysinterface; use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; - use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder}; + use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder, PaymentPreimage, PaymentHash}; use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor}; + use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT}; use ln::router::{Route, RouteHop, Router}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; @@ -3281,7 +3412,7 @@ mod tests { use rand::{thread_rng,Rng}; use std::cell::RefCell; - use std::collections::{BTreeSet, HashMap}; + use std::collections::{BTreeSet, HashMap, HashSet}; use std::default::Default; use std::rc::Rc; use std::sync::{Arc, Mutex}; @@ -3409,7 +3540,7 @@ mod tests { }, ); - let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]); + let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &PaymentHash([0x42; 32])); // Just check the final packet encoding, as it includes all the per-hop vectors in it // anyway... assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap()); @@ -3805,6 +3936,12 @@ mod tests { _ => panic!("Unexpected event type!"), } } + + fn from_node(node: &Node) -> SendEvent { + let mut events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.pop().unwrap()) + } } macro_rules! check_added_monitors { @@ -3827,7 +3964,7 @@ mod tests { commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); } }; - ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { { let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); @@ -3852,6 +3989,23 @@ mod tests { assert!($node_a.node.get_and_clear_pending_events().is_empty()); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } + (extra_msg_option, bs_revoke_and_ack) + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => { + { + check_added_monitors!($node_a, 0); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + check_added_monitors!($node_a, 1); + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); + assert!(extra_msg_option.is_none()); + bs_revoke_and_ack + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + { + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); { let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); @@ -3890,18 +4044,18 @@ mod tests { macro_rules! get_payment_preimage_hash { ($node: expr) => { { - let payment_preimage = [*$node.network_payment_count.borrow(); 32]; + let payment_preimage = PaymentPreimage([*$node.network_payment_count.borrow(); 32]); *$node.network_payment_count.borrow_mut() += 1; - let mut payment_hash = [0; 32]; + let mut payment_hash = PaymentHash([0; 32]); let mut sha = Sha256::new(); - sha.input(&payment_preimage[..]); - sha.result(&mut payment_hash); + sha.input(&payment_preimage.0[..]); + sha.result(&mut payment_hash.0[..]); (payment_preimage, payment_hash) } } } - fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { + fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node); let mut payment_event = { @@ -3955,7 +4109,7 @@ mod tests { (our_payment_preimage, our_payment_hash) } - fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) { + fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: PaymentPreimage) { assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage)); check_added_monitors!(expected_route.last().unwrap(), 1); @@ -4040,13 +4194,13 @@ mod tests { } } - fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) { + fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: PaymentPreimage) { claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage); } const TEST_FINAL_CLTV: u32 = 32; - fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) { + fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap(); assert_eq!(route.hops.len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.hops.iter()) { @@ -4077,7 +4231,7 @@ mod tests { claim_payment(&origin, expected_route, our_payment_preimage); } - fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) { + fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: PaymentHash) { assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, PaymentFailReason::PreimageUnknown)); check_added_monitors!(expected_route.last().unwrap(), 1); @@ -4142,7 +4296,7 @@ mod tests { } } - fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) { + fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: PaymentHash) { fail_payment_along_route(origin_node, expected_route, false, our_payment_hash); } @@ -4150,12 +4304,12 @@ mod tests { let mut nodes = Vec::new(); let mut rng = thread_rng(); let secp_ctx = Secp256k1::new(); - let logger: Arc = Arc::new(test_utils::TestLogger::new()); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); - for _ in 0..node_count { + for i in 0..node_count { + let logger: Arc = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i))); let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }); let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger))); let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())}); @@ -4897,56 +5051,10 @@ mod tests { assert!(nodes[2].node.list_channels().is_empty()); } - #[test] - fn update_fee_async_shutdown() { - // Test update_fee works after shutdown start if messages are delivered out-of-order - let nodes = create_network(2); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate(); - nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_some()); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); - // Note that we don't actually test normative behavior here. The spec indicates we could - // actually send a closing_signed here, but is kinda unclear and could possibly be amended - // to require waiting on the full commitment dance before doing so (see - // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid - // ambiguity, we should wait until after the full commitment dance to send closing_signed. - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); - let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg - }, - _ => panic!("Unexpected event"), - }).unwrap(); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - } - - fn do_test_shutdown_rebroadcast(recv_count: u8) { - // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of - // messages delivered prior to disconnect - let nodes = create_network(3); + fn do_test_shutdown_rebroadcast(recv_count: u8) { + // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of + // messages delivered prior to disconnect + let nodes = create_network(3); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -5286,7 +5394,10 @@ mod tests { false } else { true } }); - assert_eq!(res.len(), 2); + assert!(res.len() == 2 || res.len() == 3); + if res.len() == 3 { + assert_eq!(res[1], res[2]); + } } assert!(node_txn.is_empty()); @@ -5369,8 +5480,7 @@ mod tests { }} } - #[test] - fn channel_reserve_test() { + fn do_channel_reserve_test(test_recv: bool) { use util::rng; use std::sync::atomic::Ordering; use ln::msgs::HandleError; @@ -5527,9 +5637,23 @@ mod tests { onion_routing_packet: onion_packet, }; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); - match err { - HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + if test_recv { + let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); + match err { + HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + } + // If we send a garbage message, the channel should get closed, making the rest of this test case fail. + assert_eq!(nodes[1].node.list_channels().len(), 1); + assert_eq!(nodes[1].node.list_channels().len(), 1); + let channel_close_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(channel_close_broadcast.len(), 1); + match channel_close_broadcast[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + return; } } @@ -5637,6 +5761,12 @@ mod tests { assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22); } + #[test] + fn channel_reserve_test() { + do_channel_reserve_test(false); + do_channel_reserve_test(true); + } + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -5782,7 +5912,7 @@ mod tests { assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present assert_eq!(revoked_local_txn[1].input.len(), 1); assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout // Revoke the old state claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); @@ -5892,7 +6022,7 @@ mod tests { send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); @@ -5901,7 +6031,7 @@ mod tests { assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid()); assert_eq!(revoked_local_txn[1].input.len(), 1); assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout + assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone()); //Revoke the old state @@ -5909,10 +6039,18 @@ mod tests { { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); - nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2); + }, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 4); @@ -5927,8 +6065,8 @@ mod tests { witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len()); assert_eq!(witness_lens.len(), 3); assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC - assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC // Next nodes[1] broadcasts its current local tx state: assert_eq!(node_txn[1].input.len(), 1); @@ -5936,7 +6074,7 @@ mod tests { assert_eq!(node_txn[2].input.len(), 1); let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap(); - assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid()); assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid); @@ -5958,7 +6096,7 @@ mod tests { // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this // time as two different claim transactions as we're gonna to timeout htlc with given a high current height let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; + let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); @@ -5968,10 +6106,18 @@ mod tests { { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); - nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2); + }, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) @@ -5999,15 +6145,15 @@ mod tests { witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len()); assert_eq!(witness_lens.len(), 3); assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC - assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC + assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC assert_eq!(node_txn[3].input.len(), 1); check_spends!(node_txn[3], chan_1.3.clone()); assert_eq!(node_txn[4].input.len(), 1); let witness_script = node_txn[4].input[0].witness.last().unwrap(); - assert_eq!(witness_script.len(), 133); //Spending an offered htlc output + assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid()); assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid); assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid); @@ -6018,129 +6164,684 @@ mod tests { } #[test] - fn test_htlc_ignore_latest_remote_commitment() { - // Test that HTLC transactions spending the latest remote commitment transaction are simply - // ignored if we cannot claim them. This originally tickled an invalid unwrap(). - let nodes = create_network(2); - create_announced_chan_between_nodes(&nodes, 0, 1); + fn test_htlc_on_chain_success() { + // Test that in case of an unilateral close onchain, we detect the state of output thanks to + // ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is + // broadcasting the right event to other nodes in payment path. + // A --------------------> B ----------------------> C (preimage) + // First, C should claim the HTLC output via HTLC-Success when its own latest local + // commitment transaction was broadcast. + // Then, B should learn the preimage from said transactions, attempting to claim backwards + // towards B. + // B should be able to claim via preimage if A then broadcasts its local tx. + // Finally, when A sees B's latest local commitment transaction it should be able to claim + // the HTLC output via the preimage it learned (which, once confirmed should generate a + // PaymentSent event). - route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); - { - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, - _ => panic!("Unexpected event"), - } - } + let nodes = create_network(3); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 2); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + // Rebalance the network a bit by relaying one payment through all the channels... + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + + let (our_payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + + // Broadcast legit commitment tx from C on B's chain + // Broadcast HTLC Success transation by C on received output from C's commitment tx on B's chain + let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(commitment_tx.len(), 1); + check_spends!(commitment_tx[0], chan_2.3.clone()); + nodes[2].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 2 (2 * HTLC-Success tx) + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn[1], commitment_tx[0]); + assert_eq!(node_txn[0], node_txn[2]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_eq!(node_txn[0].lock_time, 0); + // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); { - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, - _ => panic!("Unexpected event"), - } + let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 1); + assert_eq!(added_monitors[0].0.txid, chan_1.3.txid()); + added_monitors.clear(); + } + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), + }; + { + // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate + // commitment transaction with a corresponding HTLC-Timeout transaction, as well as a + // timeout-claim of the output that nodes[2] just claimed via success. + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 (timeout tx) * 2 (block-rescan) + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[0], node_txn[3]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_ne!(node_txn[0].lock_time, 0); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + check_spends!(node_txn[1], chan_2.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(node_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_ne!(node_txn[2].lock_time, 0); + node_txn.clear(); + } + + // Broadcast legit commitment tx from A on B's chain + // Broadcast preimage tx by B on offered output from A commitment tx on A's chain + let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_1.3.clone()); + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan) + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn[0], node_txn[2]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!(node_txn[0].lock_time, 0); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + check_spends!(node_txn[1], chan_1.3.clone()); + assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + // We don't bother to check that B can claim the HTLC output on its commitment tx here as + // we already checked the same situation with A. - // Duplicate the block_connected call since this may happen due to other listeners - // registering new transactions - nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage } => { + assert_eq!(payment_preimage, our_payment_preimage); + }, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 (HTLC-Timeout tx) * 2 (block-rescan) + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[0], node_txn[3]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_ne!(node_txn[0].lock_time, 0); + assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + check_spends!(node_txn[1], chan_1.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(node_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_ne!(node_txn[2].lock_time, 0); } #[test] - fn test_force_close_fail_back() { - // Check which HTLCs are failed-backwards on channel force-closure - let mut nodes = create_network(3); - create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 1, 2); - - let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); + fn test_htlc_on_chain_timeout() { + // Test that in case of an unilateral close onchain, we detect the state of output thanks to + // ChainWatchInterface and timeout the HTLC bacward accordingly. So here we test that ChannelManager is + // broadcasting the right event to other nodes in payment path. + // A ------------------> B ----------------------> C (timeout) + // B's commitment tx C's commitment tx + // \ \ + // B's HTLC timeout tx B's timeout tx - let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + let nodes = create_network(3); - let mut payment_event = { - nodes[0].node.send_payment(route, our_payment_hash).unwrap(); - check_added_monitors!(nodes[0], 1); + // Create some intial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; + // Rebalance the network a bit by relaying one payment thorugh all the channels... + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + let (_payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; - let events_1 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::PendingHTLCsForwardable { .. } => { }, + // Brodacast legit commitment tx from C on B's chain + let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_2.3.clone()); + nodes[2].node.fail_htlc_backwards(&payment_hash, PaymentFailReason::PreimageUnknown); + { + let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 1); + added_monitors.clear(); + } + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(!update_fail_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + }, _ => panic!("Unexpected event"), }; + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx) + assert_eq!(node_txn.len(), 1); + check_spends!(node_txn[0], chan_2.3.clone()); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71); - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); - nodes[1].node.process_pending_htlc_forwards(); - - let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_2.len(), 1); - payment_event = SendEvent::from_event(events_2.remove(0)); - assert_eq!(payment_event.msgs.len(), 1); - + // Broadcast timeout transaction by B on received output fron C's commitment tx on B's chain + // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); + let timeout_tx; + { + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 8); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 6 (HTLC-Timeout tx, commitment tx, timeout tx) * 2 (block-rescan) + assert_eq!(node_txn[0], node_txn[5]); + assert_eq!(node_txn[1], node_txn[6]); + assert_eq!(node_txn[2], node_txn[7]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[1], chan_2.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71); + assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[3], chan_2.3.clone()); + check_spends!(node_txn[4], node_txn[3].clone()); + assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(node_txn[4].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + timeout_tx = node_txn[0].clone(); + node_txn.clear(); + } + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[1], 1); - nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); - nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); - check_added_monitors!(nodes[2], 1); - let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - - // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous - // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC - // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - - nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); - let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(events_3.len(), 1); - match events_3[0] { - MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, _ => panic!("Unexpected event"), } - - let tx = { - let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't - // have a use for it unless nodes[2] learns the preimage somehow, the funds will go - // back to nodes[1] upon timeout otherwise. - assert_eq!(node_txn.len(), 1); - node_txn.remove(0) + match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(!update_fail_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), }; + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // Well... here we detect our own htlc_timeout_tx so no tx to be generated + assert_eq!(node_txn.len(), 0); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + // Broadcast legit commitment tx from B on A's chain + let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_1.3.clone()); - let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); - // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! - assert_eq!(events_4.len(), 1); - match events_4[0] { - MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { - assert_eq!(flags & 0b10, 0b10); - }, + nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, _ => panic!("Unexpected event"), } - - // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. - { + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 block-rescan + assert_eq!(node_txn.len(), 4); + assert_eq!(node_txn[0], node_txn[3]); + check_spends!(node_txn[0], commitment_tx[0].clone()); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[1], chan_1.3.clone()); + check_spends!(node_txn[2], node_txn[1].clone()); + assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71); + assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + } + + #[test] + fn test_simple_commitment_revoked_fail_backward() { + // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx + // and fail backward accordingly. + + let nodes = create_network(3); + + // Create some initial channels + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + // Get the will-be-revoked local txn from nodes[2] + let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + // Revoke the old state + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[1], 1); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fail_htlcs.len(), 1); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { .. } => {}, + _ => panic!("Unexpected event"), + } + }, + _ => panic!("Unexpected event"), + } + } + + fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) { + // Test that if our counterparty broadcasts a revoked commitment transaction we fail all + // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest + // commitment transaction anymore. + // To do this, we have the peer which will broadcast a revoked commitment transaction send + // a number of update_fail/commitment_signed updates without ever sending the RAA in + // response to our commitment_signed. This is somewhat misbehavior-y, though not + // technically disallowed and we should probably handle it reasonably. + // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet + // failed/fulfilled backwards must be in at least one of the latest two remote commitment + // transactions: + // * Once we move it out of our holding cell/add it, we will immediately include it in a + // commitment_signed (implying it will be in the latest remote commitment transaction). + // * Once they remove it, we will send a (the first) commitment_signed without the HTLC, + // and once they revoke the previous commitment transaction (allowing us to send a new + // commitment_signed) we will be free to fail/fulfill the HTLC backwards. + let mut nodes = create_network(3); + + // Create some initial channels + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + // Get the will-be-revoked local txn from nodes[2] + let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + // Revoke the old state + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let (_, first_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + + assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + // Drop the last RAA from 3 -> 2 + + assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + // Note that nodes[1] is in AwaitingRAA, so won't send a CS + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + + assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + // At this point first_payment_hash has dropped out of the latest two commitment + // transactions that nodes[1] is tracking... + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + + // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting + // on nodes[2]'s RAA. + let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (_, fourth_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[1].node.send_payment(route, fourth_payment_hash).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + check_added_monitors!(nodes[1], 0); + + if deliver_bs_raa { + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap(); + // One monitor for the new revocation preimage, one as we generate a commitment for + // nodes[0] to fail first_payment_hash backwards. + check_added_monitors!(nodes[1], 2); + } + + let mut failed_htlcs = HashSet::new(); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, fourth_payment_hash); + }, + _ => panic!("Unexpected event"), + } + + if !deliver_bs_raa { + // If we delivered the RAA already then we already failed first_payment_hash backwards. + check_added_monitors!(nodes[1], 1); + } + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 }); + match events[if deliver_bs_raa { 2 } else { 0 }] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {}, + _ => panic!("Unexpected event"), + } + if deliver_bs_raa { + match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert_eq!(nodes[2].node.get_our_node_id(), *node_id); + assert_eq!(update_add_htlcs.len(), 1); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + }, + _ => panic!("Unexpected event"), + } + } + // Due to the way backwards-failing occurs we do the updates in two steps. + let updates = match events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fail_htlcs.len(), 1); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap(); + check_added_monitors!(nodes[0], 1); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_second_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[0], 1); + + if !deliver_bs_raa { + // If we delievered B's RAA we got an unknown preimage error, not something + // that we should update our routing table for. + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + } + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), + } + + bs_second_update + }, + _ => panic!("Unexpected event"), + }; + + assert!(updates.update_add_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 2); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for event in events { + match event { + MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert!(failed_htlcs.insert(payment_hash.0)); + }, + _ => panic!("Unexpected event"), + } + + assert!(failed_htlcs.contains(&first_payment_hash.0)); + assert!(failed_htlcs.contains(&second_payment_hash.0)); + assert!(failed_htlcs.contains(&third_payment_hash.0)); + } + + #[test] + fn test_commitment_revoked_fail_backward_exhaustive() { + do_test_commitment_revoked_fail_backward_exhaustive(false); + do_test_commitment_revoked_fail_backward_exhaustive(true); + } + + #[test] + fn test_htlc_ignore_latest_remote_commitment() { + // Test that HTLC transactions spending the latest remote commitment transaction are simply + // ignored if we cannot claim them. This originally tickled an invalid unwrap(). + let nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + route_payment(&nodes[0], &[&nodes[1]], 10000000); + nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + } + + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 2); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + + { + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + } + + // Duplicate the block_connected call since this may happen due to other listeners + // registering new transactions + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]); + } + + #[test] + fn test_force_close_fail_back() { + // Check which HTLCs are failed-backwards on channel force-closure + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap(); + + let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + + let mut payment_event = { + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + payment_event = SendEvent::from_event(events_2.remove(0)); + assert_eq!(payment_event.msgs.len(), 1); + + check_added_monitors!(nodes[1], 1); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap(); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + + // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous + // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC + // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). + + nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); + let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events_3.len(), 1); + match events_3[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + + let tx = { + let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); + // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't + // have a use for it unless nodes[2] learns the preimage somehow, the funds will go + // back to nodes[1] upon timeout otherwise. + assert_eq!(node_txn.len(), 1); + node_txn.remove(0) + }; + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]); + + let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); + // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! + assert_eq!(events_4.len(), 1); + match events_4[0] { + MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => { + assert_eq!(flags & 0b10, 0b10); + }, + _ => panic!("Unexpected event"), + } + + // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. + { let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap(); monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap() .provide_payment_preimage(&our_payment_hash, &our_payment_preimage); @@ -6272,6 +6973,31 @@ mod tests { node_b.node.peer_connected(&node_a.node.get_our_node_id()); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + if send_funding_locked.0 { + // If a expects a funding_locked, it better not think it has received a revoke_and_ack + // from b + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.1 { + // If b expects a funding_locked, it better not think it has received a revoke_and_ack + // from a + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.0 || send_funding_locked.1 { + // If we expect any funding_locked's, both sides better have set + // next_local_commitment_number to 1 + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + } + let mut resp_1 = Vec::new(); for msg in reestablish_1 { node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap(); @@ -6878,15 +7604,19 @@ mod tests { let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } check_added_monitors!(nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); + assert_eq!(events_1.len(), 2); match events_1[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), }; + match events_1[1] { + MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()), + _ => panic!("Unexpected event"), + }; // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentFailed event @@ -7200,129 +7930,559 @@ mod tests { check_added_monitors!(nodes[0], 1); } } - macro_rules! handle_initial_raa { () => { - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap(); - bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fee.is_none()); + macro_rules! handle_initial_raa { () => { + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap(); + bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + } } + + if (disconnect_count & 8) == 0 { + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.is_none()); + + assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } else { + handle_initial_raa!(); + + if disconnect_count & !disconnect_flags > 3 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + + assert!(as_resp.2.is_none()); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + + assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + } + + handle_bs_raa!(); + + if disconnect_count & !disconnect_flags > 4 { + let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + + assert!(as_resp.1.is_none()); + assert!(bs_resp.1.is_none()); + + assert!(as_resp.2.unwrap() == as_commitment_update); + assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + } + } + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); + let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let events_5 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PaymentReceived { ref payment_hash, amt } => { + assert_eq!(payment_hash_2, *payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); + } + + #[test] + fn test_monitor_temporary_update_fail_a() { + do_test_monitor_temporary_update_fail(0); + do_test_monitor_temporary_update_fail(1); + do_test_monitor_temporary_update_fail(2); + do_test_monitor_temporary_update_fail(3); + do_test_monitor_temporary_update_fail(4); + do_test_monitor_temporary_update_fail(5); + } + + #[test] + fn test_monitor_temporary_update_fail_b() { + do_test_monitor_temporary_update_fail(2 | 8); + do_test_monitor_temporary_update_fail(3 | 8); + do_test_monitor_temporary_update_fail(4 | 8); + do_test_monitor_temporary_update_fail(5 | 8); + } + + #[test] + fn test_monitor_temporary_update_fail_c() { + do_test_monitor_temporary_update_fail(1 | 16); + do_test_monitor_temporary_update_fail(2 | 16); + do_test_monitor_temporary_update_fail(3 | 16); + do_test_monitor_temporary_update_fail(2 | 8 | 16); + do_test_monitor_temporary_update_fail(3 | 8 | 16); + } + + #[test] + fn test_monitor_update_fail_cs() { + // Tests handling of a monitor update failure when processing an incoming commitment_signed + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + let responses = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(responses.len(), 2); + + match responses[0] { + MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap(); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + match responses[1] { + MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => { + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }, + _ => panic!("Unexpected event"), + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { payment_hash, amt } => { + assert_eq!(payment_hash, our_payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } + + fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { + // Tests handling of a monitor update failure when processing an incoming RAA + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance a bit so that we can send backwards from 2 to 1. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Route a first payment that we'll fail backwards + let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + + let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + check_added_monitors!(nodes[0], 0); + + // While the second channel is AwaitingRAA, forward a second payment to get it into the + // holding cell. + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now fail monitor updating. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + // Attempt to forward a third payment but fail due to the second channel being unavailable + // for forwarding. + + let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel + send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + check_added_monitors!(nodes[1], 0); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + match events_2.remove(0) { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events[0] { + assert_eq!(payment_hash, payment_hash_3); + assert!(!rejected_by_dest); + } else { panic!("Unexpected event!"); } + }, + _ => panic!("Unexpected event type!"), + }; + + let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { + // Try to route another payment backwards from 2 to make sure 1 holds off on responding + let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[2].node.send_payment(route, payment_hash_4).unwrap(); + check_added_monitors!(nodes[2], 1); + + send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + (Some(payment_preimage_4), Some(payment_hash_4)) + } else { (None, None) }; + + // Restore monitor updating, ensuring we immediately get a fail-back update and a + // update_add update. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 2); + + let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); + if test_ignore_second_cs { + assert_eq!(events_3.len(), 3); + } else { + assert_eq!(events_3.len(), 2); + } + + // Note that the ordering of the events for different nodes is non-prescriptive, though the + // ordering of the two events that both go to nodes[2] have to stay in the same order. + let messages_a = match events_3.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fail_htlcs.remove(0), updates.commitment_signed) + }, + _ => panic!("Unexpected event type!"), + }; + let raa = if test_ignore_second_cs { + match events_3.remove(1) { + MessageSendEvent::SendRevokeAndACK { node_id, msg } => { + assert_eq!(node_id, nodes[2].node.get_our_node_id()); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }; + let send_event_b = SendEvent::from_event(events_3.remove(0)); + assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + + // Now deliver the new messages... + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events_4[0] { + assert_eq!(payment_hash, payment_hash_1); + assert!(rejected_by_dest); + } else { panic!("Unexpected event!"); } + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap(); + if test_ignore_second_cs { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(bs_cs.update_add_htlcs.is_empty()); + assert!(bs_cs.update_fail_htlcs.is_empty()); + assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); + assert!(bs_cs.update_fulfill_htlcs.is_empty()); + assert!(bs_cs.update_fee.is_none()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + assert!(as_cs.update_add_htlcs.is_empty()); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap(); check_added_monitors!(nodes[1], 1); - } } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } else { + commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + } - if (disconnect_count & 8) == 0 { - handle_bs_raa!(); + let events_5 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; - if disconnect_count & !disconnect_flags > 3 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[2].node.process_pending_htlc_forwards(); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); - assert!(bs_resp.1.is_none()); + let events_6 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_6.len(), 1); + match events_6[0] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + _ => panic!("Unexpected event"), + }; - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.is_none()); + if test_ignore_second_cs { + let events_7 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_7.len(), 1); + match events_7[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; - assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); - } + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); - handle_initial_raa!(); + send_event = SendEvent::from_node(&nodes[1]); + assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.msgs.len(), 1); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); - if disconnect_count & !disconnect_flags > 4 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + let events_8 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_8.len(), 1); + match events_8[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; - assert!(as_resp.1.is_none()); - assert!(bs_resp.1.is_none()); + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); - } - } else { - handle_initial_raa!(); + let events_9 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_9.len(), 1); + match events_9[0] { + Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + _ => panic!("Unexpected event"), + }; + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); + } - if disconnect_count & !disconnect_flags > 3 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + } - assert!(as_resp.1.is_none()); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + #[test] + fn test_monitor_update_fail_raa() { + do_test_monitor_update_fail_raa(false); + do_test_monitor_update_fail_raa(true); + } - assert!(as_resp.2.is_none()); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + #[test] + fn test_monitor_update_fail_reestablish() { + // Simple test for message retransmission after monitor update failure on + // channel_reestablish generating a monitor update (which comes from freeing holding cell + // HTLCs). + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); - assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); - } + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); - handle_bs_raa!(); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - if disconnect_count & !disconnect_flags > 4 { - let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - assert!(as_resp.1.is_none()); - assert!(bs_resp.1.is_none()); + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); - } - } + let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap(); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap(); - let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap(); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); - expect_pending_htlcs_forwardable!(nodes[1]); + assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); + assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); - let events_5 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_5.len(), 1); - match events_5[0] { - Event::PaymentReceived { ref payment_hash, amt } => { - assert_eq!(payment_hash_2, *payment_hash); - assert_eq!(amt, 1000000); - }, - _ => panic!("Unexpected event"), - } + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); - } + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - #[test] - fn test_monitor_temporary_update_fail_a() { - do_test_monitor_temporary_update_fail(0); - do_test_monitor_temporary_update_fail(1); - do_test_monitor_temporary_update_fail(2); - do_test_monitor_temporary_update_fail(3); - do_test_monitor_temporary_update_fail(4); - do_test_monitor_temporary_update_fail(5); - } + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); - #[test] - fn test_monitor_temporary_update_fail_b() { - do_test_monitor_temporary_update_fail(2 | 8); - do_test_monitor_temporary_update_fail(3 | 8); - do_test_monitor_temporary_update_fail(4 | 8); - do_test_monitor_temporary_update_fail(5 | 8); - } + updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); - #[test] - fn test_monitor_temporary_update_fail_c() { - do_test_monitor_temporary_update_fail(1 | 16); - do_test_monitor_temporary_update_fail(2 | 16); - do_test_monitor_temporary_update_fail(3 | 16); - do_test_monitor_temporary_update_fail(2 | 8 | 16); - do_test_monitor_temporary_update_fail(3 | 8 | 16); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), + _ => panic!("Unexpected event"), + } } #[test] @@ -7828,7 +8988,7 @@ mod tests { let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 1 (local commitment tx), ChannelMonitor: 2 (1 preimage tx) * 2 (block-rescan) check_spends!(node_txn[0], commitment_tx[0].clone()); assert_eq!(node_txn[0], node_txn[2]); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 133); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); check_spends!(node_txn[1], chan_1.3.clone()); let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1); @@ -7893,10 +9053,12 @@ mod tests { _ => panic!("Unexpected event"), } let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); assert_eq!(revoked_htlc_txn[0].input.len(), 1); - assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 133); + assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); + check_spends!(revoked_htlc_txn[1], chan_1.3.clone()); // B will generate justice tx from A's revoked commitment/HTLC tx nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); @@ -7943,9 +9105,10 @@ mod tests { } let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); assert_eq!(revoked_htlc_txn[0].input.len(), 1); - assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 138); + assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); // A will generate justice tx from B's revoked commitment/HTLC tx @@ -7971,6 +9134,226 @@ mod tests { check_spends!(spend_txn[4], node_txn[3].clone()); // spending justice tx output on htlc success tx } + #[test] + fn test_onchain_to_onchain_claim() { + // Test that in case of channel closure, we detect the state of output thanks to + // ChainWatchInterface and claim HTLC on downstream peer's remote commitment tx. + // First, have C claim an HTLC against its own latest commitment transaction. + // Then, broadcast these to B, which should update the monitor downstream on the A<->B + // channel. + // Finally, check that B will claim the HTLC output if A's latest commitment transaction + // gets broadcast. + + let nodes = create_network(3); + + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance the network a bit by relaying one payment through all the channels ... + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + + let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; + let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + check_spends!(commitment_tx[0], chan_2.3.clone()); + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + + let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx) + assert_eq!(c_txn.len(), 3); + assert_eq!(c_txn[0], c_txn[2]); + assert_eq!(commitment_tx[0], c_txn[1]); + check_spends!(c_txn[1], chan_2.3.clone()); + check_spends!(c_txn[2], c_txn[1].clone()); + assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71); + assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_eq!(c_txn[0].lock_time, 0); // Success tx + + // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}, 1); + { + let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(b_txn.len(), 4); + assert_eq!(b_txn[0], b_txn[3]); + check_spends!(b_txn[1], chan_2.3); // B local commitment tx, issued by ChannelManager + check_spends!(b_txn[2], b_txn[1].clone()); // HTLC-Timeout on B local commitment tx, issued by ChannelManager + assert_eq!(b_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output + assert_ne!(b_txn[2].lock_time, 0); // Timeout tx + check_spends!(b_txn[0], c_txn[1].clone()); // timeout tx on C remote commitment tx, issued by ChannelMonitor, * 2 due to block rescan + assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert_ne!(b_txn[2].lock_time, 0); // Timeout tx + b_txn.clear(); + } + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + check_added_monitors!(nodes[1], 1); + match msg_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + match msg_events[1] { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + }, + _ => panic!("Unexpected event"), + }; + // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx + let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone(); + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1); + let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(b_txn.len(), 3); + check_spends!(b_txn[1], chan_1.3); // Local commitment tx, issued by ChannelManager + assert_eq!(b_txn[0], b_txn[2]); // HTLC-Success tx, issued by ChannelMonitor, * 2 due to block rescan + check_spends!(b_txn[0], commitment_tx[0].clone()); + assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert_eq!(b_txn[2].lock_time, 0); // Success tx + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + match msg_events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + } + + #[test] + fn test_duplicate_payment_hash_one_failure_one_success() { + // Topology : A --> B --> C + // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim + let mut nodes = create_network(3); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (our_payment_preimage, duplicate_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000); + *nodes[0].network_payment_count.borrow_mut() -= 1; + assert_eq!(route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000).1, duplicate_payment_hash); + + let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(commitment_txn[0].input.len(), 1); + check_spends!(commitment_txn[0], chan_2.3.clone()); + + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1); + let htlc_timeout_tx; + { // Extract one of the two HTLC-Timeout transaction + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 7); + assert_eq!(node_txn[0], node_txn[5]); + assert_eq!(node_txn[1], node_txn[6]); + check_spends!(node_txn[0], commitment_txn[0].clone()); + assert_eq!(node_txn[0].input.len(), 1); + check_spends!(node_txn[1], commitment_txn[0].clone()); + assert_eq!(node_txn[1].input.len(), 1); + assert_ne!(node_txn[0].input[0], node_txn[1].input[0]); + check_spends!(node_txn[2], chan_2.3.clone()); + check_spends!(node_txn[3], node_txn[2].clone()); + check_spends!(node_txn[4], node_txn[2].clone()); + htlc_timeout_tx = node_txn[1].clone(); + } + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } + + nodes[2].node.claim_funds(our_payment_preimage); + nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1); + check_added_monitors!(nodes[2], 2); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } + let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + assert_eq!(htlc_success_txn.len(), 5); + check_spends!(htlc_success_txn[2], chan_2.3.clone()); + assert_eq!(htlc_success_txn[0], htlc_success_txn[3]); + assert_eq!(htlc_success_txn[0].input.len(), 1); + assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!(htlc_success_txn[1], htlc_success_txn[4]); + assert_eq!(htlc_success_txn[1].input.len(), 1); + assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_ne!(htlc_success_txn[0].input[0], htlc_success_txn[1].input[0]); + check_spends!(htlc_success_txn[0], commitment_txn[0].clone()); + check_spends!(htlc_success_txn[1], commitment_txn[0].clone()); + + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200); + let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(htlc_updates.update_add_htlcs.is_empty()); + assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); + assert_eq!(htlc_updates.update_fail_htlcs[0].htlc_id, 1); + assert!(htlc_updates.update_fulfill_htlcs.is_empty()); + assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + { + commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelClosed { .. } } => { + }, + _ => { panic!("Unexpected event"); } + } + } + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(*payment_hash, duplicate_payment_hash); + } + _ => panic!("Unexpected event"), + } + + // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert_eq!(updates.update_fulfill_htlcs[0].htlc_id, 0); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, our_payment_preimage); + } + _ => panic!("Unexpected event"), + } + } + #[test] fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let nodes = create_network(2); @@ -7999,13 +9382,14 @@ mod tests { } let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 138); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); check_spends!(node_txn[0], local_txn[0].clone()); // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[1], 1); - assert_eq!(spend_txn.len(), 1); + assert_eq!(spend_txn.len(), 2); check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[1], node_txn[2].clone()); } #[test] @@ -8030,14 +9414,18 @@ mod tests { } let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 133); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); check_spends!(node_txn[0], local_txn[0].clone()); // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[0], 1); - assert_eq!(spend_txn.len(), 4); + assert_eq!(spend_txn.len(), 8); assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[0], spend_txn[4]); + assert_eq!(spend_txn[0], spend_txn[6]); assert_eq!(spend_txn[1], spend_txn[3]); + assert_eq!(spend_txn[1], spend_txn[5]); + assert_eq!(spend_txn[1], spend_txn[7]); check_spends!(spend_txn[0], local_txn[0].clone()); check_spends!(spend_txn[1], node_txn[0].clone()); }