X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=52819526b727232907178c653d17b0c7ba542ea4;hb=96d17ee7370af557330ccd8618648e82a6899ac0;hp=ad8f70b0304011328686a60a75e8c7091ac869cd;hpb=fe3d706d5b35c7ace87ebc7f529b98b727ffb18f;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index ad8f70b0..52819526 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -90,7 +90,7 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub struct HTLCPreviousHopData { pub(super) short_channel_id: u64, pub(super) htlc_id: u64, @@ -98,7 +98,7 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), OutboundRoute { @@ -135,9 +135,13 @@ pub(super) use self::channel_held_info::*; type ShutdownResult = (Vec, Vec<(HTLCSource, [u8; 32])>); +/// Error type returned across the channel_state mutex boundary. When an Err is generated for a +/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel +/// immediately (ie with no further calls on it made). Thus, this step happens inside a +/// channel_state lock. We then return the set of things that need to be done outside the lock in +/// this struct and call handle_error!() on it. struct MsgHandleErrInternal { err: msgs::HandleError, - needs_channel_force_close: bool, shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -153,29 +157,12 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: false, - shutdown_finish: None, - } - } - #[inline] - fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self { - Self { - err: HandleError { - err, - action: Some(msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: err.to_string() - }, - }), - }, - needs_channel_force_close: true, shutdown_finish: None, } } #[inline] fn from_no_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: false, shutdown_finish: None } + Self { err, shutdown_finish: None } } #[inline] fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { @@ -189,7 +176,6 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: false, shutdown_finish: Some((shutdown_res, channel_update)), } } @@ -211,7 +197,6 @@ impl MsgHandleErrInternal { }), }, }, - needs_channel_force_close: false, shutdown_finish: None, } } @@ -405,28 +390,7 @@ macro_rules! handle_error { ($self: ident, $internal: expr, $their_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, needs_channel_force_close, shutdown_finish }) => { - if needs_channel_force_close { - match &err.action { - &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {}, - &Some(msgs::ErrorAction::IgnoreError) => {}, - &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &None => {}, - } - } + Err(MsgHandleErrInternal { err, shutdown_finish }) => { if let Some((shutdown_res, update_option)) = shutdown_finish { $self.finish_force_close_channel(shutdown_res); if let Some(update) = update_option { @@ -478,6 +442,61 @@ macro_rules! try_chan_entry { } } +macro_rules! return_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { + if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + // TODO: $failed_fails is dropped here, which will cause other channels to hit the + // chain in a confused state! We need to move them into the ChannelMonitor which + // will be responsible for failing backwards once things confirm on-chain. + // It's ok that we drop $failed_forwards here - at this point we'd rather they + // broadcast HTLC-Timeout and pay the associated fees to get their funds back than + // us bother trying to claim it just to forward on to another peer. If we're + // splitting hairs we'd prefer to claim payments that were to us, but we haven't + // given up the preimage yet, so might as well just wait until the payment is + // retried, avoiding the on-chain fees. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + }, + } + } +} + +// Does not break in case of TemporaryFailure! +macro_rules! maybe_break_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + }, + } + } +} + impl ChannelManager { /// Constructs a new ChannelManager to hold several channels and route between them. /// @@ -660,13 +679,6 @@ impl ChannelManager { for tx in local_txn { self.tx_broadcaster.broadcast_transaction(&tx); } - //TODO: We need to have a way where outbound HTLC claims can result in us claiming the - //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards). - //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and - //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after - //timeouts are hit and our claims confirm). - //TODO: In any case, we need to make sure we remove any pending htlc tracking (via - //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel } /// Force closes a channel, immediately broadcasting the latest local commitment transaction to @@ -703,33 +715,6 @@ impl ChannelManager { } } - fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { - match err { - ChannelMonitorUpdateErr::PermanentFailure => { - let mut chan = { - let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - if let Some(short_id) = chan.get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - chan - }; - mem::drop(channel_state_lock); - self.finish_force_close_channel(chan.force_shutdown()); - if let Ok(update) = self.get_channel_update(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - channel.monitor_update_failed(reason); - }, - } - } - #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) { assert_eq!(shared_secret.len(), 32); @@ -1229,7 +1214,17 @@ impl ChannelManager { /// May generate a SendHTLCs message event on success, which should be relayed. /// /// Raises APIError::RoutError when invalid route or forward parameter - /// (cltv_delta, fee, node public key) is specified + /// (cltv_delta, fee, node public key) is specified. + /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// (including due to previous monitor update failure or new permanent monitor update failure). + /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// relevant updates. + /// + /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed + /// and you may wish to retry via a different route immediately. + /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably + /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry + /// the payment via a different route unless you intend to pay twice! pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { if route.hops.len() < 1 || route.hops.len() > 20 { return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); @@ -1241,11 +1236,7 @@ impl ChannelManager { } } - let session_priv = SecretKey::from_slice(&self.secp_ctx, &{ - let mut session_key = [0; 32]; - rng::fill_bytes(&mut session_key); - session_key - }).expect("RNG is bad!"); + let session_priv = self.keys_manager.get_session_key(); let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; @@ -1264,45 +1255,46 @@ impl ChannelManager { Some(id) => id.clone(), }; - match { - let channel_state = channel_lock.borrow_parts(); - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + let channel_state = channel_lock.borrow_parts(); + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey { return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); } - if chan.get().is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); } break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { route: route.clone(), session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, }, onion_packet), channel_state, chan) - } else { unreachable!(); } - } { - Some((update_add, commitment_signed, chan_monitor)) => { - if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - self.handle_monitor_update_fail(channel_lock, &id, e, RAACommitmentOrder::CommitmentFirst); - return Err(APIError::MonitorUpdateFailed); - } + } { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resent the commitment update once we unfree monitor + // updating, so we have to take special care that we don't return + // something else in case we will resend later! + return Err(APIError::MonitorUpdateFailed); + } - channel_lock.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: route.hops.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - }, - None => {}, - } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: route.hops.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + }, + None => {}, + } + } else { unreachable!(); } return Ok(()); }; @@ -1481,7 +1473,7 @@ impl ChannelManager { }, }; if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!();// but def dont push the event... + unimplemented!(); } channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), @@ -1719,6 +1711,13 @@ impl ChannelManager { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { + // TODO: There may be some pending HTLCs that we intended to fail + // backwards when a monitor update failed. We should make sure + // knowledge of those gets moved into the appropriate in-memory + // ChannelMonitor and they get failed backwards once we get + // on-chain confirmations. + // Note I think #198 addresses this, so once its merged a test + // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -2301,7 +2300,7 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if (msg.failure_code & 0x8000) == 0 { - return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id)); + try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan); } try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); Ok(()) @@ -2321,8 +2320,9 @@ impl ChannelManager { } let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: their_node_id.clone(), @@ -2398,8 +2398,8 @@ impl ChannelManager { } let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2461,9 +2461,10 @@ impl ChannelManager { let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); - let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id); - secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action); - secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action); + if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || + self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { + try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); + } let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); @@ -2492,11 +2493,21 @@ impl ChannelManager { if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan); if let Some(monitor) = channel_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + if revoke_and_ack.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + if commitment_update.is_none() { + order = RAACommitmentOrder::RevokeAndACKFirst; + } + return_monitor_err!(self, e, channel_state, chan, order); + //TODO: Resend the funding_locked if needed once we get the monitor running again } } if let Some(msg) = funding_locked { @@ -2700,6 +2711,13 @@ impl ChainListener for ChannelManager { for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + { + for htlc_update in self.monitor.fetch_pending_htlc_updated() { + if let Some(preimage) = htlc_update.payment_preimage { + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } + } + } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash(); } @@ -3868,6 +3886,12 @@ mod tests { _ => panic!("Unexpected event type!"), } } + + fn from_node(node: &Node) -> SendEvent { + let mut events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.pop().unwrap()) + } } macro_rules! check_added_monitors { @@ -3890,7 +3914,7 @@ mod tests { commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); } }; - ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { { let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); @@ -3915,6 +3939,23 @@ mod tests { assert!($node_a.node.get_and_clear_pending_events().is_empty()); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } + (extra_msg_option, bs_revoke_and_ack) + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => { + { + check_added_monitors!($node_a, 0); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + check_added_monitors!($node_a, 1); + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); + assert!(extra_msg_option.is_none()); + bs_revoke_and_ack + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + { + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); { let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); @@ -4960,52 +5001,6 @@ mod tests { assert!(nodes[2].node.list_channels().is_empty()); } - #[test] - fn update_fee_async_shutdown() { - // Test update_fee works after shutdown start if messages are delivered out-of-order - let nodes = create_network(2); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate(); - nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_some()); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); - // Note that we don't actually test normative behavior here. The spec indicates we could - // actually send a closing_signed here, but is kinda unclear and could possibly be amended - // to require waiting on the full commitment dance before doing so (see - // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid - // ambiguity, we should wait until after the full commitment dance to send closing_signed. - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); - let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg - }, - _ => panic!("Unexpected event"), - }).unwrap(); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - } - fn do_test_shutdown_rebroadcast(recv_count: u8) { // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of // messages delivered prior to disconnect @@ -5349,7 +5344,10 @@ mod tests { false } else { true } }); - assert_eq!(res.len(), 2); + assert!(res.len() == 2 || res.len() == 3); + if res.len() == 3 { + assert_eq!(res[1], res[2]); + } } assert!(node_txn.is_empty()); @@ -6354,6 +6352,31 @@ mod tests { node_b.node.peer_connected(&node_a.node.get_our_node_id()); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + if send_funding_locked.0 { + // If a expects a funding_locked, it better not think it has received a revoke_and_ack + // from b + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.1 { + // If b expects a funding_locked, it better not think it has received a revoke_and_ack + // from a + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.0 || send_funding_locked.1 { + // If we expect any funding_locked's, both sides better have set + // next_local_commitment_number to 1 + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + } + let mut resp_1 = Vec::new(); for msg in reestablish_1 { node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap(); @@ -6960,15 +6983,19 @@ mod tests { let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } check_added_monitors!(nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); + assert_eq!(events_1.len(), 2); match events_1[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), }; + match events_1[1] { + MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()), + _ => panic!("Unexpected event"), + }; // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentFailed event @@ -7407,6 +7434,436 @@ mod tests { do_test_monitor_temporary_update_fail(3 | 8 | 16); } + #[test] + fn test_monitor_update_fail_cs() { + // Tests handling of a monitor update failure when processing an incoming commitment_signed + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + let responses = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(responses.len(), 2); + + match responses[0] { + MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap(); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + match responses[1] { + MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => { + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }, + _ => panic!("Unexpected event"), + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { payment_hash, amt } => { + assert_eq!(payment_hash, our_payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } + + fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { + // Tests handling of a monitor update failure when processing an incoming RAA + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance a bit so that we can send backwards from 2 to 1. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Route a first payment that we'll fail backwards + let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + + let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + check_added_monitors!(nodes[0], 0); + + // While the second channel is AwaitingRAA, forward a second payment to get it into the + // holding cell. + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now fail monitor updating. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + // Attempt to forward a third payment but fail due to the second channel being unavailable + // for forwarding. + + let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel + send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + check_added_monitors!(nodes[1], 0); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + match events_2.remove(0) { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events[0] { + assert_eq!(payment_hash, payment_hash_3); + assert!(!rejected_by_dest); + } else { panic!("Unexpected event!"); } + }, + _ => panic!("Unexpected event type!"), + }; + + let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { + // Try to route another payment backwards from 2 to make sure 1 holds off on responding + let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[2].node.send_payment(route, payment_hash_4).unwrap(); + check_added_monitors!(nodes[2], 1); + + send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + (Some(payment_preimage_4), Some(payment_hash_4)) + } else { (None, None) }; + + // Restore monitor updating, ensuring we immediately get a fail-back update and a + // update_add update. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 2); + + let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); + if test_ignore_second_cs { + assert_eq!(events_3.len(), 3); + } else { + assert_eq!(events_3.len(), 2); + } + + // Note that the ordering of the events for different nodes is non-prescriptive, though the + // ordering of the two events that both go to nodes[2] have to stay in the same order. + let messages_a = match events_3.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fail_htlcs.remove(0), updates.commitment_signed) + }, + _ => panic!("Unexpected event type!"), + }; + let raa = if test_ignore_second_cs { + match events_3.remove(1) { + MessageSendEvent::SendRevokeAndACK { node_id, msg } => { + assert_eq!(node_id, nodes[2].node.get_our_node_id()); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }; + let send_event_b = SendEvent::from_event(events_3.remove(0)); + assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + + // Now deliver the new messages... + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events_4[0] { + assert_eq!(payment_hash, payment_hash_1); + assert!(rejected_by_dest); + } else { panic!("Unexpected event!"); } + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap(); + if test_ignore_second_cs { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(bs_cs.update_add_htlcs.is_empty()); + assert!(bs_cs.update_fail_htlcs.is_empty()); + assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); + assert!(bs_cs.update_fulfill_htlcs.is_empty()); + assert!(bs_cs.update_fee.is_none()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + assert!(as_cs.update_add_htlcs.is_empty()); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } else { + commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + } + + let events_5 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[2].node.process_pending_htlc_forwards(); + + let events_6 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_6.len(), 1); + match events_6[0] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + _ => panic!("Unexpected event"), + }; + + if test_ignore_second_cs { + let events_7 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_7.len(), 1); + match events_7[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); + + send_event = SendEvent::from_node(&nodes[1]); + assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.msgs.len(), 1); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); + + let events_8 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_8.len(), 1); + match events_8[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + + let events_9 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_9.len(), 1); + match events_9[0] { + Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + _ => panic!("Unexpected event"), + }; + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); + } + + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + } + + #[test] + fn test_monitor_update_fail_raa() { + do_test_monitor_update_fail_raa(false); + do_test_monitor_update_fail_raa(true); + } + + #[test] + fn test_monitor_update_fail_reestablish() { + // Simple test for message retransmission after monitor update failure on + // channel_reestablish generating a monitor update (which comes from freeing holding cell + // HTLCs). + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); + assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), + _ => panic!("Unexpected event"), + } + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs @@ -7680,8 +8137,8 @@ mod tests { } else { panic!("Unexpected result"); } } - macro_rules! check_dynamic_output_p2wsh { - ($node: expr) => { + macro_rules! check_spendable_outputs { + ($node: expr, $der_idx: expr) => { { let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events(); let mut txn = Vec::new(); @@ -7690,6 +8147,33 @@ mod tests { Event::SpendableOutputs { ref outputs } => { for outp in outputs { match *outp { + SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => { + let input = TxIn { + previous_output: outpoint.clone(), + script_sig: Script::new(), + sequence: 0, + witness: Vec::new(), + }; + let outp = TxOut { + script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), + value: output.value, + }; + let mut spend_tx = Transaction { + version: 2, + lock_time: 0, + input: vec![input], + output: vec![outp], + }; + let secp_ctx = Secp256k1::new(); + let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key); + let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey(); + let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); + let remotesig = secp_ctx.sign(&sighash, key); + spend_tx.input[0].witness.push(remotesig.serialize_der(&secp_ctx).to_vec()); + spend_tx.input[0].witness[0].push(SigHashType::All as u8); + spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec()); + txn.push(spend_tx); + }, SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => { let input = TxIn { previous_output: outpoint.clone(), @@ -7716,29 +8200,8 @@ mod tests { spend_tx.input[0].witness.push(witness_script.clone().into_bytes()); txn.push(spend_tx); }, - _ => panic!("Unexpected event"), - } - } - }, - _ => panic!("Unexpected event"), - }; - } - txn - } - } - } - - macro_rules! check_dynamic_output_p2wpkh { - ($node: expr) => { - { - let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events(); - let mut txn = Vec::new(); - for event in events { - match event { - Event::SpendableOutputs { ref outputs } => { - for outp in outputs { - match *outp { - SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => { + SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { + let secp_ctx = Secp256k1::new(); let input = TxIn { previous_output: outpoint.clone(), script_sig: Script::new(), @@ -7753,19 +8216,28 @@ mod tests { version: 2, lock_time: 0, input: vec![input], - output: vec![outp], + output: vec![outp.clone()], }; - let secp_ctx = Secp256k1::new(); - let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key); - let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey(); + let secret = { + match ExtendedPrivKey::new_master(&secp_ctx, Network::Testnet, &$node.node_seed) { + Ok(master_key) => { + match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) { + Ok(key) => key, + Err(_) => panic!("Your RNG is busted"), + } + } + Err(_) => panic!("Your rng is busted"), + } + }; + let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; + let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); - let remotesig = secp_ctx.sign(&sighash, key); - spend_tx.input[0].witness.push(remotesig.serialize_der(&secp_ctx).to_vec()); + let sig = secp_ctx.sign(&sighash, &secret.secret_key); + spend_tx.input[0].witness.push(sig.serialize_der(&secp_ctx).to_vec()); spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec()); + spend_tx.input[0].witness.push(pubkey.serialize().to_vec()); txn.push(spend_tx); }, - _ => panic!("Unexpected event"), } } }, @@ -7777,57 +8249,6 @@ mod tests { } } - macro_rules! check_static_output { - ($event: expr, $node: expr, $event_idx: expr, $output_idx: expr, $der_idx: expr, $idx_node: expr) => { - match $event[$event_idx] { - Event::SpendableOutputs { ref outputs } => { - match outputs[$output_idx] { - SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { - let secp_ctx = Secp256k1::new(); - let input = TxIn { - previous_output: outpoint.clone(), - script_sig: Script::new(), - sequence: 0, - witness: Vec::new(), - }; - let outp = TxOut { - script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), - value: output.value, - }; - let mut spend_tx = Transaction { - version: 2, - lock_time: 0, - input: vec![input], - output: vec![outp.clone()], - }; - let secret = { - match ExtendedPrivKey::new_master(&secp_ctx, Network::Testnet, &$node[$idx_node].node_seed) { - Ok(master_key) => { - match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) { - Ok(key) => key, - Err(_) => panic!("Your RNG is busted"), - } - } - Err(_) => panic!("Your rng is busted"), - } - }; - let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; - let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); - let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); - let sig = secp_ctx.sign(&sighash, &secret.secret_key); - spend_tx.input[0].witness.push(sig.serialize_der(&secp_ctx).to_vec()); - spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(pubkey.serialize().to_vec()); - spend_tx - }, - _ => panic!("Unexpected event !"), - } - }, - _ => panic!("Unexpected event !"), - }; - } - } - #[test] fn test_claim_sizeable_push_msat() { // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx @@ -7847,14 +8268,14 @@ mod tests { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0); - let spend_txn = check_dynamic_output_p2wsh!(nodes[1]); + let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0].clone()); } #[test] fn test_claim_on_remote_sizeable_push_msat() { - // Same test as precedent, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and + // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and // to_remote output is encumbered by a P2WPKH let nodes = create_network(2); @@ -7878,12 +8299,42 @@ mod tests { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } - let spend_txn = check_dynamic_output_p2wpkh!(nodes[1]); + let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 2); assert_eq!(spend_txn[0], spend_txn[1]); check_spends!(spend_txn[0], node_txn[0].clone()); } + #[test] + fn test_claim_on_remote_revoked_sizeable_push_msat() { + // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and + // to_remote output is encumbered by a P2WPKH + + let nodes = create_network(2); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid()); + + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 4); + assert_eq!(spend_txn[0], spend_txn[2]); // to_remote output on revoked remote commitment_tx + check_spends!(spend_txn[0], revoked_local_txn[0].clone()); + assert_eq!(spend_txn[1], spend_txn[3]); // to_local output on local commitment tx + check_spends!(spend_txn[1], node_txn[0].clone()); + } + #[test] fn test_static_spendable_outputs_preimage_tx() { let nodes = create_network(2); @@ -7919,9 +8370,10 @@ mod tests { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 133); check_spends!(node_txn[1], chan_1.3.clone()); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 1, 1); - check_spends!(spend_tx, node_txn[0].clone()); + let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); } #[test] @@ -7951,9 +8403,10 @@ mod tests { assert_eq!(node_txn[0].input.len(), 2); check_spends!(node_txn[0], revoked_local_txn[0].clone()); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 1, 1); - check_spends!(spend_tx, node_txn[0].clone()); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); } #[test] @@ -7979,10 +8432,12 @@ mod tests { _ => panic!("Unexpected event"), } let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); assert_eq!(revoked_htlc_txn[0].input.len(), 1); assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 133); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); + check_spends!(revoked_htlc_txn[1], chan_1.3.clone()); // B will generate justice tx from A's revoked commitment/HTLC tx nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); @@ -7997,10 +8452,12 @@ mod tests { assert_eq!(node_txn[3].input.len(), 1); check_spends!(node_txn[3], revoked_htlc_txn[0].clone()); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); // Check B's ChannelMonitor was able to generate the right spendable output descriptor - let spend_tx = check_static_output!(events, nodes, 1, 1, 1, 1); - check_spends!(spend_tx, node_txn[3].clone()); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 3); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[2], node_txn[3].clone()); } #[test] @@ -8027,7 +8484,8 @@ mod tests { } let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); assert_eq!(revoked_htlc_txn[0].input.len(), 1); assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 138); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); @@ -8045,10 +8503,14 @@ mod tests { assert_eq!(node_txn[3].input.len(), 1); check_spends!(node_txn[3], revoked_htlc_txn[0].clone()); - let events = nodes[0].chan_monitor.simple_monitor.get_and_clear_pending_events(); // Check A's ChannelMonitor was able to generate the right spendable output descriptor - let spend_tx = check_static_output!(events, nodes, 1, 2, 1, 0); - check_spends!(spend_tx, node_txn[3].clone()); + let spend_txn = check_spendable_outputs!(nodes[0], 1); + assert_eq!(spend_txn.len(), 5); + assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[1], spend_txn[3]); + check_spends!(spend_txn[0], revoked_local_txn[0].clone()); // spending to_remote output from revoked local tx + check_spends!(spend_txn[1], node_txn[2].clone()); // spending justice tx output from revoked local tx htlc received output + check_spends!(spend_txn[4], node_txn[3].clone()); // spending justice tx output on htlc success tx } #[test] @@ -8083,9 +8545,10 @@ mod tests { check_spends!(node_txn[0], local_txn[0].clone()); // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_dynamic_output_p2wsh!(nodes[1]); - assert_eq!(spend_txn.len(), 1); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 2); check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[1], node_txn[2].clone()); } #[test] @@ -8114,10 +8577,14 @@ mod tests { check_spends!(node_txn[0], local_txn[0].clone()); // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_dynamic_output_p2wsh!(nodes[0]); - assert_eq!(spend_txn.len(), 4); + let spend_txn = check_spendable_outputs!(nodes[0], 1); + assert_eq!(spend_txn.len(), 8); assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[0], spend_txn[4]); + assert_eq!(spend_txn[0], spend_txn[6]); assert_eq!(spend_txn[1], spend_txn[3]); + assert_eq!(spend_txn[1], spend_txn[5]); + assert_eq!(spend_txn[1], spend_txn[7]); check_spends!(spend_txn[0], local_txn[0].clone()); check_spends!(spend_txn[1], node_txn[0].clone()); } @@ -8133,13 +8600,13 @@ mod tests { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1); - let events = nodes[0].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 2, 0); - check_spends!(spend_tx, closing_tx.clone()); + let spend_txn = check_spendable_outputs!(nodes[0], 2); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx.clone()); nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 2, 1); - check_spends!(spend_tx, closing_tx); + let spend_txn = check_spendable_outputs!(nodes[1], 2); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx); } }