X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=e0d2b9aaaa37222a17f5b134a994bf1080ed4f2b;hb=302f1314e5ad33e38d7be000498a670f0e30b183;hp=20a3779899a620b185723bdf78d7513017321ee4;hpb=86944d34a1fb845fd01772f591422026d393a38d;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 20a37798..e0d2b9aa 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -133,9 +133,16 @@ mod channel_held_info { } pub(super) use self::channel_held_info::*; +type ShutdownResult = (Vec, Vec<(HTLCSource, [u8; 32])>); + +/// Error type returned across the channel_state mutex boundary. When an Err is generated for a +/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel +/// immediately (ie with no further calls on it made). Thus, this step happens inside a +/// channel_state lock. We then return the set of things that need to be done outside the lock in +/// this struct and call handle_error!() on it. struct MsgHandleErrInternal { err: msgs::HandleError, - needs_channel_force_close: bool, + shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { #[inline] @@ -150,11 +157,15 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: false, + shutdown_finish: None, } } #[inline] - fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self { + fn from_no_close(err: msgs::HandleError) -> Self { + Self { err, shutdown_finish: None } + } + #[inline] + fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: HandleError { err, @@ -165,18 +176,10 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: true, + shutdown_finish: Some((shutdown_res, channel_update)), } } #[inline] - fn from_maybe_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: true } - } - #[inline] - fn from_no_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: false } - } - #[inline] fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { @@ -194,28 +197,7 @@ impl MsgHandleErrInternal { }), }, }, - needs_channel_force_close: false, - } - } - #[inline] - fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { - Self { - err: match err { - ChannelError::Ignore(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::IgnoreError), - }, - ChannelError::Close(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: msg.to_string() - }, - }), - }, - }, - needs_channel_force_close: true, + shutdown_finish: None, } } } @@ -404,6 +386,117 @@ pub struct ChannelDetails { pub user_id: u64, } +macro_rules! handle_error { + ($self: ident, $internal: expr, $their_node_id: expr) => { + match $internal { + Ok(msg) => Ok(msg), + Err(MsgHandleErrInternal { err, shutdown_finish }) => { + if let Some((shutdown_res, update_option)) = shutdown_finish { + $self.finish_force_close_channel(shutdown_res); + if let Some(update) = update_option { + let mut channel_state = $self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + } + Err(err) + }, + } + } +} + +macro_rules! break_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! try_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! return_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { + if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + // TODO: $failed_fails is dropped here, which will cause other channels to hit the + // chain in a confused state! We need to move them into the ChannelMonitor which + // will be responsible for failing backwards once things confirm on-chain. + // It's ok that we drop $failed_forwards here - at this point we'd rather they + // broadcast HTLC-Timeout and pay the associated fees to get their funds back than + // us bother trying to claim it just to forward on to another peer. If we're + // splitting hairs we'd prefer to claim payments that were to us, but we haven't + // given up the preimage yet, so might as well just wait until the payment is + // retried, avoiding the on-chain fees. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + }, + } + } +} + +// Does not break in case of TemporaryFailure! +macro_rules! maybe_break_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + }, + } + } +} + impl ChannelManager { /// Constructs a new ChannelManager to hold several channels and route between them. /// @@ -577,7 +670,7 @@ impl ChannelManager { } #[inline] - fn finish_force_close_channel(&self, shutdown_res: (Vec, Vec<(HTLCSource, [u8; 32])>)) { + fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (local_txn, mut failed_htlcs) = shutdown_res; for htlc_source in failed_htlcs.drain(..) { // unknown_next_peer...I dunno who that is anymore.... @@ -629,33 +722,6 @@ impl ChannelManager { } } - fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { - match err { - ChannelMonitorUpdateErr::PermanentFailure => { - let mut chan = { - let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - if let Some(short_id) = chan.get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - chan - }; - mem::drop(channel_state_lock); - self.finish_force_close_channel(chan.force_shutdown()); - if let Ok(update) = self.get_channel_update(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - channel.monitor_update_failed(reason); - }, - } - } - #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) { assert_eq!(shared_secret.len(), 32); @@ -1155,7 +1221,17 @@ impl ChannelManager { /// May generate a SendHTLCs message event on success, which should be relayed. /// /// Raises APIError::RoutError when invalid route or forward parameter - /// (cltv_delta, fee, node public key) is specified + /// (cltv_delta, fee, node public key) is specified. + /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// (including due to previous monitor update failure or new permanent monitor update failure). + /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// relevant updates. + /// + /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed + /// and you may wish to retry via a different route immediately. + /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably + /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry + /// the payment via a different route unless you intend to pay twice! pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { if route.hops.len() < 1 || route.hops.len() > 20 { return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); @@ -1167,11 +1243,7 @@ impl ChannelManager { } } - let session_priv = SecretKey::from_slice(&self.secp_ctx, &{ - let mut session_key = [0; 32]; - rng::fill_bytes(&mut session_key); - session_key - }).expect("RNG is bad!"); + let session_priv = self.keys_manager.get_session_key(); let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; @@ -1181,53 +1253,73 @@ impl ChannelManager { let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state = self.channel_state.lock().unwrap(); - let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { - None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), - Some(id) => id.clone(), - }; + let err: Result<(), _> = loop { + let mut channel_lock = self.channel_state.lock().unwrap(); - let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); - } - chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - route: route.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? - }; - match res { - Some((update_add, commitment_signed, chan_monitor)) => { - if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); - return Err(APIError::MonitorUpdateFailed); - } + let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { + None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), + Some(id) => id.clone(), + }; + + let channel_state = channel_lock.borrow_parts(); + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { + if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + route: route.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet), channel_state, chan) + } { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resent the commitment update once we unfree monitor + // updating, so we have to take special care that we don't return + // something else in case we will resend later! + return Err(APIError::MonitorUpdateFailed); + } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: route.hops.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: route.hops.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); }, - }); + None => {}, + } + } else { unreachable!(); } + return Ok(()); + }; + + match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: route.hops.first().unwrap().pubkey, + action: e.action, + }); + } + Err(APIError::ChannelUnavailable { err: e.err }) }, - None => {}, } - - Ok(()) } /// Call this upon creation of a funding transaction for the given channel. @@ -1243,24 +1335,32 @@ impl ChannelManager { let _ = self.total_consistency_lock.read().unwrap(); let (chan, msg, chan_monitor) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.remove(temporary_channel_id) { - Some(mut chan) => { - match chan.get_outbound_funding_created(funding_txo) { - Ok(funding_msg) => { - (chan, funding_msg.0, funding_msg.1) - }, - Err(e) => { - log_error!(self, "Got bad signatures: {}!", e.err); - channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_their_node_id(), - action: e.action, - }); - return; - }, - } + let (res, chan) = { + let mut channel_state = self.channel_state.lock().unwrap(); + match channel_state.by_id.remove(temporary_channel_id) { + Some(mut chan) => { + (chan.get_outbound_funding_created(funding_txo) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + } else { unreachable!(); }) + , chan) + }, + None => return + } + }; + match handle_error!(self, res, chan.get_their_node_id()) { + Ok(funding_msg) => { + (chan, funding_msg.0, funding_msg.1) + }, + Err(e) => { + log_error!(self, "Got bad signatures: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_their_node_id(), + action: e.action, + }); + return; }, - None => return } }; // Because we have exclusive ownership of the channel here we can release the channel_state @@ -1372,9 +1472,7 @@ impl ChannelManager { let (commitment_msg, monitor) = match forward_chan.send_commitment() { Ok(res) => res, Err(e) => { - if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action { - } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action { - } else { + if let ChannelError::Ignore(_) = e { panic!("Stated return value requirements in send_commitment() were not met"); } //TODO: Handle...this is bad! @@ -1382,7 +1480,7 @@ impl ChannelManager { }, }; if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!();// but def dont push the event... + unimplemented!(); } channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), @@ -1620,6 +1718,13 @@ impl ChannelManager { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { + // TODO: There may be some pending HTLCs that we intended to fail + // backwards when a monitor update failed. We should make sure + // knowledge of those gets moved into the appropriate in-memory + // ChannelMonitor and they get failed backwards once we get + // on-chain confirmations. + // Note I think #198 addresses this, so once its merged a test + // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -1706,19 +1811,19 @@ impl ChannelManager { fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.temporary_channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg, &self.default_configuration) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; - (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, //TODO: same as above - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1732,22 +1837,16 @@ impl ChannelManager { } fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let (chan, funding_msg, monitor_update) = { - let mut channel_state = self.channel_state.lock().unwrap(); + let ((funding_msg, monitor_update), chan) = { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - match chan.get_mut().funding_created(msg) { - Ok((funding_msg, monitor_update)) => { - (chan.remove(), funding_msg, monitor_update) - }, - Err(e) => { - return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) - } - } + (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } @@ -1776,20 +1875,21 @@ impl ChannelManager { fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let (funding_txo, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } - (chan.get_funding_txo().unwrap(), chan.get_user_id()) + (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1803,15 +1903,14 @@ impl ChannelManager { fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Some(announcement_sigs) = self.get_announcement_sigs(chan) { + try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); + if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) { channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: their_node_id.clone(), msg: announcement_sigs, @@ -1819,7 +1918,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -1834,7 +1933,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: their_node_id.clone(), @@ -1882,7 +1981,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (closing_signed, tx) = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = closing_signed { channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: their_node_id.clone(), @@ -1931,44 +2030,57 @@ impl ChannelManager { let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { + if !chan.get().is_usable() { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { + let chan_update = self.get_channel_update(chan.get()); pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, - reason: ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &self.get_channel_update(chan).unwrap().encode_with_len()[..]), + reason: if let Ok(update) = chan_update { + ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &update.encode_with_len()[..]) + } else { + // This can only happen if the channel isn't in the fully-funded + // state yet, implying our counterparty is trying to route payments + // over the channel back to themselves (cause no one else should + // know the short_id is a lightning channel yet). We should have no + // problem just calling this unknown_next_peer + ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[]) + }, })); } } - chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - chan.update_fulfill_htlc(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + let mut channel_lock = self.channel_state.lock().unwrap(); + let htlc_source = { + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } }; - self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); Ok(()) } @@ -2170,52 +2282,54 @@ impl ChannelManager { } fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }?; + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } Ok(()) } fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if (msg.failure_code & 0x8000) == 0 { - return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id)); + try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); Ok(()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: their_node_id.clone(), @@ -2242,7 +2356,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -2283,15 +2397,16 @@ impl ChannelManager { let (pending_forwards, mut pending_failures, short_channel_id) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2305,9 +2420,9 @@ impl ChannelManager { msg, }); } - (pending_forwards, pending_failures, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) + (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; for failure in pending_failures.drain(..) { @@ -2319,41 +2434,44 @@ impl ChannelManager { } fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { + if !chan.get().is_usable() { return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); } let our_node_id = self.get_our_node_id(); - let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (announcement, our_bitcoin_sig) = + try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); - let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id); - secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action); - secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action); + if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || + self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { + try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); + } let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); @@ -2365,10 +2483,10 @@ impl ChannelManager { bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig }, contents: announcement, }, - update_msg: self.get_channel_update(chan).unwrap(), // can only fail if we're not in a ready state + update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state }); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } Ok(()) } @@ -2377,16 +2495,26 @@ impl ChannelManager { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) = + try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan); if let Some(monitor) = channel_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + if revoke_and_ack.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + if commitment_update.is_none() { + order = RAACommitmentOrder::RevokeAndACKFirst; + } + return_monitor_err!(self, e, channel_state, chan, order); + //TODO: Resend the funding_locked if needed once we get the monitor running again } } if let Some(msg) = funding_locked { @@ -2429,7 +2557,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -2440,40 +2568,62 @@ impl ChannelManager { #[doc(hidden)] pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = channel_state_lock.borrow_parts(); + let their_node_id; + let err: Result<(), _> = loop { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&channel_id) { - None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), - Some(chan) => { - if !chan.is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); - } - if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + match channel_state.by_id.entry(channel_id) { + hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + hash_map::Entry::Occupied(mut chan) => { + if !chan.get().is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, + if chan.get().is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); + } + their_node_id = chan.get().get_their_node_id(); + if let Some((update_fee, commitment_signed, chan_monitor)) = + break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan) + { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + } + }, + } + return Ok(()) + }; + + match handle_error!(self, err, their_node_id) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, }); } + Err(APIError::APIMisuseError { err: e.err }) }, } - Ok(()) } } @@ -2521,11 +2671,9 @@ impl ChainListener for ChannelManager { } else if let Err(e) = chan_res { pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_their_node_id(), - action: e.action, + action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }), }); - if channel.is_shutdown() { - return false; - } + return false; } if let Some(funding_txo) = channel.get_funding_txo() { for tx in txn_matched { @@ -2608,38 +2756,6 @@ impl ChainListener for ChannelManager { } } -macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr) => { - match $internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, needs_channel_force_close }) => { - if needs_channel_force_close { - match &err.action { - &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {}, - &Some(msgs::ErrorAction::IgnoreError) => {}, - &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &None => {}, - } - } - Err(err) - }, - } - } -} - impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { @@ -3770,6 +3886,12 @@ mod tests { _ => panic!("Unexpected event type!"), } } + + fn from_node(node: &Node) -> SendEvent { + let mut events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.pop().unwrap()) + } } macro_rules! check_added_monitors { @@ -3792,7 +3914,7 @@ mod tests { commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); } }; - ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { { let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); @@ -3817,6 +3939,23 @@ mod tests { assert!($node_a.node.get_and_clear_pending_events().is_empty()); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } + (extra_msg_option, bs_revoke_and_ack) + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => { + { + check_added_monitors!($node_a, 0); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + check_added_monitors!($node_a, 1); + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); + assert!(extra_msg_option.is_none()); + bs_revoke_and_ack + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + { + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); { let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); @@ -4446,7 +4585,7 @@ mod tests { #[test] fn test_update_fee_that_funder_cannot_afford() { - let mut nodes = create_network(2); + let nodes = create_network(2); let channel_value = 1888; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000); let channel_id = chan.2; @@ -4481,7 +4620,7 @@ mod tests { let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()).unwrap(); //While producing the commitment_signed response after handling a received update_fee request the //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) @@ -4862,52 +5001,6 @@ mod tests { assert!(nodes[2].node.list_channels().is_empty()); } - #[test] - fn update_fee_async_shutdown() { - // Test update_fee works after shutdown start if messages are delivered out-of-order - let nodes = create_network(2); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate(); - nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_some()); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); - // Note that we don't actually test normative behavior here. The spec indicates we could - // actually send a closing_signed here, but is kinda unclear and could possibly be amended - // to require waiting on the full commitment dance before doing so (see - // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid - // ambiguity, we should wait until after the full commitment dance to send closing_signed. - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); - let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg - }, - _ => panic!("Unexpected event"), - }).unwrap(); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - } - fn do_test_shutdown_rebroadcast(recv_count: u8) { // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of // messages delivered prior to disconnect @@ -5334,8 +5427,7 @@ mod tests { }} } - #[test] - fn channel_reserve_test() { + fn do_channel_reserve_test(test_recv: bool) { use util::rng; use std::sync::atomic::Ordering; use ln::msgs::HandleError; @@ -5492,9 +5584,23 @@ mod tests { onion_routing_packet: onion_packet, }; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); - match err { - HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + if test_recv { + let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); + match err { + HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + } + // If we send a garbage message, the channel should get closed, making the rest of this test case fail. + assert_eq!(nodes[1].node.list_channels().len(), 1); + assert_eq!(nodes[1].node.list_channels().len(), 1); + let channel_close_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(channel_close_broadcast.len(), 1); + match channel_close_broadcast[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + return; } } @@ -5602,6 +5708,12 @@ mod tests { assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22); } + #[test] + fn channel_reserve_test() { + do_channel_reserve_test(false); + do_channel_reserve_test(true); + } + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -6237,6 +6349,31 @@ mod tests { node_b.node.peer_connected(&node_a.node.get_our_node_id()); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + if send_funding_locked.0 { + // If a expects a funding_locked, it better not think it has received a revoke_and_ack + // from b + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.1 { + // If b expects a funding_locked, it better not think it has received a revoke_and_ack + // from a + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.0 || send_funding_locked.1 { + // If we expect any funding_locked's, both sides better have set + // next_local_commitment_number to 1 + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + } + let mut resp_1 = Vec::new(); for msg in reestablish_1 { node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap(); @@ -6843,15 +6980,19 @@ mod tests { let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } check_added_monitors!(nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); + assert_eq!(events_1.len(), 2); match events_1[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), }; + match events_1[1] { + MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()), + _ => panic!("Unexpected event"), + }; // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentFailed event @@ -7290,6 +7431,436 @@ mod tests { do_test_monitor_temporary_update_fail(3 | 8 | 16); } + #[test] + fn test_monitor_update_fail_cs() { + // Tests handling of a monitor update failure when processing an incoming commitment_signed + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + let responses = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(responses.len(), 2); + + match responses[0] { + MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap(); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + match responses[1] { + MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => { + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }, + _ => panic!("Unexpected event"), + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { payment_hash, amt } => { + assert_eq!(payment_hash, our_payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } + + fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { + // Tests handling of a monitor update failure when processing an incoming RAA + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance a bit so that we can send backwards from 2 to 1. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Route a first payment that we'll fail backwards + let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + + let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + check_added_monitors!(nodes[0], 0); + + // While the second channel is AwaitingRAA, forward a second payment to get it into the + // holding cell. + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now fail monitor updating. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + // Attempt to forward a third payment but fail due to the second channel being unavailable + // for forwarding. + + let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel + send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + check_added_monitors!(nodes[1], 0); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + match events_2.remove(0) { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events[0] { + assert_eq!(payment_hash, payment_hash_3); + assert!(!rejected_by_dest); + } else { panic!("Unexpected event!"); } + }, + _ => panic!("Unexpected event type!"), + }; + + let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { + // Try to route another payment backwards from 2 to make sure 1 holds off on responding + let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[2].node.send_payment(route, payment_hash_4).unwrap(); + check_added_monitors!(nodes[2], 1); + + send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + (Some(payment_preimage_4), Some(payment_hash_4)) + } else { (None, None) }; + + // Restore monitor updating, ensuring we immediately get a fail-back update and a + // update_add update. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 2); + + let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); + if test_ignore_second_cs { + assert_eq!(events_3.len(), 3); + } else { + assert_eq!(events_3.len(), 2); + } + + // Note that the ordering of the events for different nodes is non-prescriptive, though the + // ordering of the two events that both go to nodes[2] have to stay in the same order. + let messages_a = match events_3.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fail_htlcs.remove(0), updates.commitment_signed) + }, + _ => panic!("Unexpected event type!"), + }; + let raa = if test_ignore_second_cs { + match events_3.remove(1) { + MessageSendEvent::SendRevokeAndACK { node_id, msg } => { + assert_eq!(node_id, nodes[2].node.get_our_node_id()); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }; + let send_event_b = SendEvent::from_event(events_3.remove(0)); + assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + + // Now deliver the new messages... + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events_4[0] { + assert_eq!(payment_hash, payment_hash_1); + assert!(rejected_by_dest); + } else { panic!("Unexpected event!"); } + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap(); + if test_ignore_second_cs { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(bs_cs.update_add_htlcs.is_empty()); + assert!(bs_cs.update_fail_htlcs.is_empty()); + assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); + assert!(bs_cs.update_fulfill_htlcs.is_empty()); + assert!(bs_cs.update_fee.is_none()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + assert!(as_cs.update_add_htlcs.is_empty()); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } else { + commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + } + + let events_5 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[2].node.process_pending_htlc_forwards(); + + let events_6 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_6.len(), 1); + match events_6[0] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + _ => panic!("Unexpected event"), + }; + + if test_ignore_second_cs { + let events_7 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_7.len(), 1); + match events_7[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); + + send_event = SendEvent::from_node(&nodes[1]); + assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.msgs.len(), 1); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); + + let events_8 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_8.len(), 1); + match events_8[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + + let events_9 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_9.len(), 1); + match events_9[0] { + Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + _ => panic!("Unexpected event"), + }; + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); + } + + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + } + + #[test] + fn test_monitor_update_fail_raa() { + do_test_monitor_update_fail_raa(false); + do_test_monitor_update_fail_raa(true); + } + + #[test] + fn test_monitor_update_fail_reestablish() { + // Simple test for message retransmission after monitor update failure on + // channel_reestablish generating a monitor update (which comes from freeing holding cell + // HTLCs). + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); + assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), + _ => panic!("Unexpected event"), + } + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs @@ -7563,8 +8134,8 @@ mod tests { } else { panic!("Unexpected result"); } } - macro_rules! check_dynamic_output_p2wsh { - ($node: expr) => { + macro_rules! check_spendable_outputs { + ($node: expr, $der_idx: expr) => { { let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events(); let mut txn = Vec::new(); @@ -7573,6 +8144,33 @@ mod tests { Event::SpendableOutputs { ref outputs } => { for outp in outputs { match *outp { + SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => { + let input = TxIn { + previous_output: outpoint.clone(), + script_sig: Script::new(), + sequence: 0, + witness: Vec::new(), + }; + let outp = TxOut { + script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), + value: output.value, + }; + let mut spend_tx = Transaction { + version: 2, + lock_time: 0, + input: vec![input], + output: vec![outp], + }; + let secp_ctx = Secp256k1::new(); + let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key); + let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey(); + let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); + let remotesig = secp_ctx.sign(&sighash, key); + spend_tx.input[0].witness.push(remotesig.serialize_der(&secp_ctx).to_vec()); + spend_tx.input[0].witness[0].push(SigHashType::All as u8); + spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec()); + txn.push(spend_tx); + }, SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => { let input = TxIn { previous_output: outpoint.clone(), @@ -7599,29 +8197,8 @@ mod tests { spend_tx.input[0].witness.push(witness_script.clone().into_bytes()); txn.push(spend_tx); }, - _ => panic!("Unexpected event"), - } - } - }, - _ => panic!("Unexpected event"), - }; - } - txn - } - } - } - - macro_rules! check_dynamic_output_p2wpkh { - ($node: expr) => { - { - let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events(); - let mut txn = Vec::new(); - for event in events { - match event { - Event::SpendableOutputs { ref outputs } => { - for outp in outputs { - match *outp { - SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => { + SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { + let secp_ctx = Secp256k1::new(); let input = TxIn { previous_output: outpoint.clone(), script_sig: Script::new(), @@ -7636,19 +8213,28 @@ mod tests { version: 2, lock_time: 0, input: vec![input], - output: vec![outp], + output: vec![outp.clone()], }; - let secp_ctx = Secp256k1::new(); - let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key); - let witness_script = Address::p2pkh(&remotepubkey, Network::Testnet).script_pubkey(); + let secret = { + match ExtendedPrivKey::new_master(&secp_ctx, Network::Testnet, &$node.node_seed) { + Ok(master_key) => { + match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) { + Ok(key) => key, + Err(_) => panic!("Your RNG is busted"), + } + } + Err(_) => panic!("Your rng is busted"), + } + }; + let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; + let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); - let remotesig = secp_ctx.sign(&sighash, key); - spend_tx.input[0].witness.push(remotesig.serialize_der(&secp_ctx).to_vec()); + let sig = secp_ctx.sign(&sighash, &secret.secret_key); + spend_tx.input[0].witness.push(sig.serialize_der(&secp_ctx).to_vec()); spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec()); + spend_tx.input[0].witness.push(pubkey.serialize().to_vec()); txn.push(spend_tx); }, - _ => panic!("Unexpected event"), } } }, @@ -7660,57 +8246,6 @@ mod tests { } } - macro_rules! check_static_output { - ($event: expr, $node: expr, $event_idx: expr, $output_idx: expr, $der_idx: expr, $idx_node: expr) => { - match $event[$event_idx] { - Event::SpendableOutputs { ref outputs } => { - match outputs[$output_idx] { - SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { - let secp_ctx = Secp256k1::new(); - let input = TxIn { - previous_output: outpoint.clone(), - script_sig: Script::new(), - sequence: 0, - witness: Vec::new(), - }; - let outp = TxOut { - script_pubkey: Builder::new().push_opcode(opcodes::All::OP_RETURN).into_script(), - value: output.value, - }; - let mut spend_tx = Transaction { - version: 2, - lock_time: 0, - input: vec![input], - output: vec![outp.clone()], - }; - let secret = { - match ExtendedPrivKey::new_master(&secp_ctx, Network::Testnet, &$node[$idx_node].node_seed) { - Ok(master_key) => { - match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx)) { - Ok(key) => key, - Err(_) => panic!("Your RNG is busted"), - } - } - Err(_) => panic!("Your rng is busted"), - } - }; - let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; - let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); - let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap(); - let sig = secp_ctx.sign(&sighash, &secret.secret_key); - spend_tx.input[0].witness.push(sig.serialize_der(&secp_ctx).to_vec()); - spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(pubkey.serialize().to_vec()); - spend_tx - }, - _ => panic!("Unexpected event !"), - } - }, - _ => panic!("Unexpected event !"), - }; - } - } - #[test] fn test_claim_sizeable_push_msat() { // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx @@ -7730,14 +8265,14 @@ mod tests { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0); - let spend_txn = check_dynamic_output_p2wsh!(nodes[1]); + let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0].clone()); } #[test] fn test_claim_on_remote_sizeable_push_msat() { - // Same test as precedent, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and + // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and // to_remote output is encumbered by a P2WPKH let nodes = create_network(2); @@ -7761,12 +8296,42 @@ mod tests { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } - let spend_txn = check_dynamic_output_p2wpkh!(nodes[1]); + let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 2); assert_eq!(spend_txn[0], spend_txn[1]); check_spends!(spend_txn[0], node_txn[0].clone()); } + #[test] + fn test_claim_on_remote_revoked_sizeable_push_msat() { + // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and + // to_remote output is encumbered by a P2WPKH + + let nodes = create_network(2); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000); + let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone(); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid()); + + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 4); + assert_eq!(spend_txn[0], spend_txn[2]); // to_remote output on revoked remote commitment_tx + check_spends!(spend_txn[0], revoked_local_txn[0].clone()); + assert_eq!(spend_txn[1], spend_txn[3]); // to_local output on local commitment tx + check_spends!(spend_txn[1], node_txn[0].clone()); + } + #[test] fn test_static_spendable_outputs_preimage_tx() { let nodes = create_network(2); @@ -7802,9 +8367,10 @@ mod tests { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 133); check_spends!(node_txn[1], chan_1.3.clone()); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 1, 1); - check_spends!(spend_tx, node_txn[0].clone()); + let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); } #[test] @@ -7834,9 +8400,10 @@ mod tests { assert_eq!(node_txn[0].input.len(), 2); check_spends!(node_txn[0], revoked_local_txn[0].clone()); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 1, 1); - check_spends!(spend_tx, node_txn[0].clone()); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); } #[test] @@ -7880,10 +8447,12 @@ mod tests { assert_eq!(node_txn[3].input.len(), 1); check_spends!(node_txn[3], revoked_htlc_txn[0].clone()); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); // Check B's ChannelMonitor was able to generate the right spendable output descriptor - let spend_tx = check_static_output!(events, nodes, 1, 1, 1, 1); - check_spends!(spend_tx, node_txn[3].clone()); + let spend_txn = check_spendable_outputs!(nodes[1], 1); + assert_eq!(spend_txn.len(), 3); + assert_eq!(spend_txn[0], spend_txn[1]); + check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[2], node_txn[3].clone()); } #[test] @@ -7928,10 +8497,14 @@ mod tests { assert_eq!(node_txn[3].input.len(), 1); check_spends!(node_txn[3], revoked_htlc_txn[0].clone()); - let events = nodes[0].chan_monitor.simple_monitor.get_and_clear_pending_events(); // Check A's ChannelMonitor was able to generate the right spendable output descriptor - let spend_tx = check_static_output!(events, nodes, 1, 2, 1, 0); - check_spends!(spend_tx, node_txn[3].clone()); + let spend_txn = check_spendable_outputs!(nodes[0], 1); + assert_eq!(spend_txn.len(), 5); + assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[1], spend_txn[3]); + check_spends!(spend_txn[0], revoked_local_txn[0].clone()); // spending to_remote output from revoked local tx + check_spends!(spend_txn[1], node_txn[2].clone()); // spending justice tx output from revoked local tx htlc received output + check_spends!(spend_txn[4], node_txn[3].clone()); // spending justice tx output on htlc success tx } #[test] @@ -7966,7 +8539,7 @@ mod tests { check_spends!(node_txn[0], local_txn[0].clone()); // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_dynamic_output_p2wsh!(nodes[1]); + let spend_txn = check_spendable_outputs!(nodes[1], 1); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0].clone()); } @@ -7997,7 +8570,7 @@ mod tests { check_spends!(node_txn[0], local_txn[0].clone()); // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_dynamic_output_p2wsh!(nodes[0]); + let spend_txn = check_spendable_outputs!(nodes[0], 1); assert_eq!(spend_txn.len(), 4); assert_eq!(spend_txn[0], spend_txn[2]); assert_eq!(spend_txn[1], spend_txn[3]); @@ -8016,13 +8589,13 @@ mod tests { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1); - let events = nodes[0].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 2, 0); - check_spends!(spend_tx, closing_tx.clone()); + let spend_txn = check_spendable_outputs!(nodes[0], 2); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx.clone()); nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1); - let events = nodes[1].chan_monitor.simple_monitor.get_and_clear_pending_events(); - let spend_tx = check_static_output!(events, nodes, 0, 0, 2, 1); - check_spends!(spend_tx, closing_tx); + let spend_txn = check_spendable_outputs!(nodes[1], 2); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx); } }