X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=44dee8d4bb4c65af5804ac3a9c3ea46ac7a8c628;hb=aa0a37cc4bffacddd1ca75c44876b628dd915e44;hp=ac4fa819f52d7447adb6bce7c181496f1a48297f;hpb=7e59e1e3927f3baf586bc393bdd519449eaed08b;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index ac4fa819..44dee8d4 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -133,9 +133,16 @@ mod channel_held_info { } pub(super) use self::channel_held_info::*; +type ShutdownResult = (Vec, Vec<(HTLCSource, [u8; 32])>); + +/// Error type returned across the channel_state mutex boundary. When an Err is generated for a +/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel +/// immediately (ie with no further calls on it made). Thus, this step happens inside a +/// channel_state lock. We then return the set of things that need to be done outside the lock in +/// this struct and call handle_error!() on it. struct MsgHandleErrInternal { err: msgs::HandleError, - needs_channel_force_close: bool, + shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { #[inline] @@ -150,11 +157,15 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: false, + shutdown_finish: None, } } #[inline] - fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self { + fn from_no_close(err: msgs::HandleError) -> Self { + Self { err, shutdown_finish: None } + } + #[inline] + fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: HandleError { err, @@ -165,18 +176,10 @@ impl MsgHandleErrInternal { }, }), }, - needs_channel_force_close: true, + shutdown_finish: Some((shutdown_res, channel_update)), } } #[inline] - fn from_maybe_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: true } - } - #[inline] - fn from_no_close(err: msgs::HandleError) -> Self { - Self { err, needs_channel_force_close: false } - } - #[inline] fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { @@ -194,28 +197,7 @@ impl MsgHandleErrInternal { }), }, }, - needs_channel_force_close: false, - } - } - #[inline] - fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self { - Self { - err: match err { - ChannelError::Ignore(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::IgnoreError), - }, - ChannelError::Close(msg) => HandleError { - err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { - channel_id, - data: msg.to_string() - }, - }), - }, - }, - needs_channel_force_close: true, + shutdown_finish: None, } } } @@ -408,26 +390,14 @@ macro_rules! handle_error { ($self: ident, $internal: expr, $their_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, needs_channel_force_close }) => { - if needs_channel_force_close { - match &err.action { - &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {}, - &Some(msgs::ErrorAction::IgnoreError) => {}, - &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => { - if msg.channel_id == [0; 32] { - $self.peer_disconnected(&$their_node_id, true); - } else { - $self.force_close_channel(&msg.channel_id); - } - }, - &None => {}, + Err(MsgHandleErrInternal { err, shutdown_finish }) => { + if let Some((shutdown_res, update_option)) = shutdown_finish { + $self.finish_force_close_channel(shutdown_res); + if let Some(update) = update_option { + let mut channel_state = $self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); } } Err(err) @@ -436,6 +406,97 @@ macro_rules! handle_error { } } +macro_rules! break_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! try_chan_entry { + ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(ChannelError::Ignore(msg)) => { + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) + }, + Err(ChannelError::Close(msg)) => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + } + } +} + +macro_rules! return_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { + if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) + }; + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + // TODO: $failed_fails is dropped here, which will cause other channels to hit the + // chain in a confused state! We need to move them into the ChannelMonitor which + // will be responsible for failing backwards once things confirm on-chain. + // It's ok that we drop $failed_forwards here - at this point we'd rather they + // broadcast HTLC-Timeout and pay the associated fees to get their funds back than + // us bother trying to claim it just to forward on to another peer. If we're + // splitting hairs we'd prefer to claim payments that were to us, but we haven't + // given up the preimage yet, so might as well just wait until the payment is + // retried, avoiding the on-chain fees. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + }, + } + } +} + +// Does not break in case of TemporaryFailure! +macro_rules! maybe_break_monitor_err { + ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + match $err { + ChannelMonitorUpdateErr::PermanentFailure => { + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + }, + } + } +} + impl ChannelManager { /// Constructs a new ChannelManager to hold several channels and route between them. /// @@ -609,7 +670,7 @@ impl ChannelManager { } #[inline] - fn finish_force_close_channel(&self, shutdown_res: (Vec, Vec<(HTLCSource, [u8; 32])>)) { + fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (local_txn, mut failed_htlcs) = shutdown_res; for htlc_source in failed_htlcs.drain(..) { // unknown_next_peer...I dunno who that is anymore.... @@ -661,33 +722,6 @@ impl ChannelManager { } } - fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) { - match err { - ChannelMonitorUpdateErr::PermanentFailure => { - let mut chan = { - let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - if let Some(short_id) = chan.get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - chan - }; - mem::drop(channel_state_lock); - self.finish_force_close_channel(chan.force_shutdown()); - if let Ok(update) = self.get_channel_update(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!"); - channel.monitor_update_failed(reason); - }, - } - } - #[inline] fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) { assert_eq!(shared_secret.len(), 32); @@ -1187,7 +1221,17 @@ impl ChannelManager { /// May generate a SendHTLCs message event on success, which should be relayed. /// /// Raises APIError::RoutError when invalid route or forward parameter - /// (cltv_delta, fee, node public key) is specified + /// (cltv_delta, fee, node public key) is specified. + /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// (including due to previous monitor update failure or new permanent monitor update failure). + /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// relevant updates. + /// + /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed + /// and you may wish to retry via a different route immediately. + /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably + /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry + /// the payment via a different route unless you intend to pay twice! pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> { if route.hops.len() < 1 || route.hops.len() > 20 { return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); @@ -1209,63 +1253,73 @@ impl ChannelManager { let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state = self.channel_state.lock().unwrap(); - let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { - None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), - Some(id) => id.clone(), - }; + let err: Result<(), _> = loop { + let mut channel_lock = self.channel_state.lock().unwrap(); - let res = { - let chan = channel_state.by_id.get_mut(&id).unwrap(); - if chan.get_their_node_id() != route.hops.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"}); - } - chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - route: route.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet).map_err(|he| - match he { - ChannelError::Close(err) => { - // TODO: We need to close the channel here, but for that to be safe we have - // to do all channel closure inside the channel_state lock which is a - // somewhat-larger refactor, so we leave that for later. - APIError::ChannelUnavailable { err } + let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { + None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), + Some(id) => id.clone(), + }; + + let channel_state = channel_lock.borrow_parts(); + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { + if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + route: route.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet), channel_state, chan) + } { + Some((update_add, commitment_signed, chan_monitor)) => { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resent the commitment update once we unfree monitor + // updating, so we have to take special care that we don't return + // something else in case we will resend later! + return Err(APIError::MonitorUpdateFailed); + } + + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: route.hops.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); }, - ChannelError::Ignore(err) => APIError::ChannelUnavailable { err }, + None => {}, } - )? + } else { unreachable!(); } + return Ok(()); }; - match res { - Some((update_add, commitment_signed, chan_monitor)) => { - if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst); - return Err(APIError::MonitorUpdateFailed); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: route.hops.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); + match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: route.hops.first().unwrap().pubkey, + action: e.action, + }); + } + Err(APIError::ChannelUnavailable { err: e.err }) }, - None => {}, } - - Ok(()) } /// Call this upon creation of a funding transaction for the given channel. @@ -1286,7 +1340,9 @@ impl ChannelManager { match channel_state.by_id.remove(temporary_channel_id) { Some(mut chan) => { (chan.get_outbound_funding_created(funding_txo) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, chan.channel_id())) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + } else { unreachable!(); }) , chan) }, None => return @@ -1424,7 +1480,7 @@ impl ChannelManager { }, }; if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!();// but def dont push the event... + unimplemented!(); } channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: forward_chan.get_their_node_id(), @@ -1662,6 +1718,13 @@ impl ChannelManager { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { + // TODO: There may be some pending HTLCs that we intended to fail + // backwards when a monitor update failed. We should make sure + // knowledge of those gets moved into the appropriate in-memory + // ChannelMonitor and they get failed backwards once we get + // on-chain confirmations. + // Note I think #198 addresses this, so once its merged a test + // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -1748,19 +1811,19 @@ impl ChannelManager { fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> { let (value, output_script, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.temporary_channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - chan.accept_channel(&msg, &self.default_configuration) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?; - (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, //TODO: same as above - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1774,22 +1837,16 @@ impl ChannelManager { } fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let (chan, funding_msg, monitor_update) = { - let mut channel_state = self.channel_state.lock().unwrap(); + let ((funding_msg, monitor_update), chan) = { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id)); } - match chan.get_mut().funding_created(msg) { - Ok((funding_msg, monitor_update)) => { - (chan.remove(), funding_msg, monitor_update) - }, - Err(e) => { - return Err(e).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id)) - } - } + (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id)) } @@ -1818,20 +1875,21 @@ impl ChannelManager { fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let (funding_txo, user_id) = { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { unimplemented!(); } - (chan.get_funding_txo().unwrap(), chan.get_user_id()) + (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -1845,15 +1903,14 @@ impl ChannelManager { fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.funding_locked(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Some(announcement_sigs) = self.get_announcement_sigs(chan) { + try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); + if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) { channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: their_node_id.clone(), msg: announcement_sigs, @@ -1861,7 +1918,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -1876,7 +1933,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: their_node_id.clone(), @@ -1924,7 +1981,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (closing_signed, tx) = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?; + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry); if let Some(msg) = closing_signed { channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { node_id: their_node_id.clone(), @@ -1973,18 +2030,18 @@ impl ChannelManager { let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { + if !chan.get().is_usable() { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { - let chan_update = self.get_channel_update(chan); + let chan_update = self.get_channel_update(chan.get()); pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, @@ -2001,26 +2058,29 @@ impl ChannelManager { })); } } - chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e)) + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { - //TODO: here and below MsgHandleErrInternal, #153 case - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); - } - chan.update_fulfill_htlc(&msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone() - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + let mut channel_lock = self.channel_state.lock().unwrap(); + let htlc_source = { + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { + //TODO: here and below MsgHandleErrInternal, #153 case + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); + } + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } }; - self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); Ok(()) } @@ -2222,53 +2282,54 @@ impl ChannelManager { } fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) - }?; + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + } Ok(()) } fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if (msg.failure_code & 0x8000) == 0 { - return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id)); + try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan); } - chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); Ok(()) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: their_node_id.clone(), @@ -2295,7 +2356,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -2336,16 +2397,16 @@ impl ChannelManager { let (pending_forwards, mut pending_failures, short_channel_id) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = + try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2359,9 +2420,9 @@ impl ChannelManager { msg, }); } - (pending_forwards, pending_failures, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel")) + (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")) }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } }; for failure in pending_failures.drain(..) { @@ -2373,41 +2434,44 @@ impl ChannelManager { } fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_lock.borrow_parts(); + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id)) + try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } + Ok(()) } fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - if !chan.is_usable() { + if !chan.get().is_usable() { return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); } let our_node_id = self.get_our_node_id(); - let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (announcement, our_bitcoin_sig) = + try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); - let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id); - secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action); - secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action); + if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || + self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { + try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); + } let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); @@ -2419,10 +2483,10 @@ impl ChannelManager { bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig }, contents: announcement, }, - update_msg: self.get_channel_update(chan).unwrap(), // can only fail if we're not in a ready state + update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state }); }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } Ok(()) } @@ -2431,16 +2495,26 @@ impl ChannelManager { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&msg.channel_id) { - Some(chan) => { - if chan.get_their_node_id() != *their_node_id { + match channel_state.by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg) - .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?; + let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) = + try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan); if let Some(monitor) = channel_monitor { - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + if revoke_and_ack.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + if commitment_update.is_none() { + order = RAACommitmentOrder::RevokeAndACKFirst; + } + return_monitor_err!(self, e, channel_state, chan, order); + //TODO: Resend the funding_locked if needed once we get the monitor running again } } if let Some(msg) = funding_locked { @@ -2483,7 +2557,7 @@ impl ChannelManager { } Ok(()) }, - None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } } @@ -2494,49 +2568,62 @@ impl ChannelManager { #[doc(hidden)] pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = channel_state_lock.borrow_parts(); + let their_node_id; + let err: Result<(), _> = loop { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = channel_state_lock.borrow_parts(); - match channel_state.by_id.get_mut(&channel_id) { - None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), - Some(chan) => { - if !chan.is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); - } - if chan.is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); - } - if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw) - .map_err(|e| match e { - ChannelError::Ignore(err) => APIError::APIMisuseError{err}, - ChannelError::Close(err) => { - // TODO: We need to close the channel here, but for that to be safe we have - // to do all channel closure inside the channel_state lock which is a - // somewhat-larger refactor, so we leave that for later. - APIError::APIMisuseError{err} + match channel_state.by_id.entry(channel_id) { + hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}), + hash_map::Entry::Occupied(mut chan) => { + if !chan.get().is_outbound() { + return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"}); + } + if chan.get().is_awaiting_monitor_update() { + return Err(APIError::MonitorUpdateFailed); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"}); + } + their_node_id = chan.get().get_their_node_id(); + if let Some((update_fee, commitment_signed, chan_monitor)) = + break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan) + { + if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, }, - })? { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, + }, + } + return Ok(()) + }; + + match handle_error!(self, err, their_node_id) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, }); } + Err(APIError::APIMisuseError { err: e.err }) }, } - Ok(()) } } @@ -2584,11 +2671,9 @@ impl ChainListener for ChannelManager { } else if let Err(e) = chan_res { pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_their_node_id(), - action: e.action, + action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }), }); - if channel.is_shutdown() { - return false; - } + return false; } if let Some(funding_txo) = channel.get_funding_txo() { for tx in txn_matched { @@ -3801,6 +3886,12 @@ mod tests { _ => panic!("Unexpected event type!"), } } + + fn from_node(node: &Node) -> SendEvent { + let mut events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.pop().unwrap()) + } } macro_rules! check_added_monitors { @@ -3823,7 +3914,7 @@ mod tests { commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); } }; - ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { { let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); @@ -3848,6 +3939,23 @@ mod tests { assert!($node_a.node.get_and_clear_pending_events().is_empty()); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } + (extra_msg_option, bs_revoke_and_ack) + } + }; + ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => { + { + check_added_monitors!($node_a, 0); + assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); + $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap(); + check_added_monitors!($node_a, 1); + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); + assert!(extra_msg_option.is_none()); + bs_revoke_and_ack + } + }; + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { + { + let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); { let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap(); @@ -5365,8 +5473,7 @@ mod tests { }} } - #[test] - fn channel_reserve_test() { + fn do_channel_reserve_test(test_recv: bool) { use util::rng; use std::sync::atomic::Ordering; use ln::msgs::HandleError; @@ -5523,9 +5630,23 @@ mod tests { onion_routing_packet: onion_packet, }; - let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); - match err { - HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + if test_recv { + let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap(); + match err { + HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"), + } + // If we send a garbage message, the channel should get closed, making the rest of this test case fail. + assert_eq!(nodes[1].node.list_channels().len(), 1); + assert_eq!(nodes[1].node.list_channels().len(), 1); + let channel_close_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(channel_close_broadcast.len(), 1); + match channel_close_broadcast[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + return; } } @@ -5633,6 +5754,12 @@ mod tests { assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22); } + #[test] + fn channel_reserve_test() { + do_channel_reserve_test(false); + do_channel_reserve_test(true); + } + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -6268,6 +6395,31 @@ mod tests { node_b.node.peer_connected(&node_a.node.get_our_node_id()); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + if send_funding_locked.0 { + // If a expects a funding_locked, it better not think it has received a revoke_and_ack + // from b + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.1 { + // If b expects a funding_locked, it better not think it has received a revoke_and_ack + // from a + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_remote_commitment_number, 0); + } + } + if send_funding_locked.0 || send_funding_locked.1 { + // If we expect any funding_locked's, both sides better have set + // next_local_commitment_number to 1 + for reestablish in reestablish_1.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + for reestablish in reestablish_2.iter() { + assert_eq!(reestablish.next_local_commitment_number, 1); + } + } + let mut resp_1 = Vec::new(); for msg in reestablish_1 { node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap(); @@ -6874,15 +7026,19 @@ mod tests { let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); - if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } + if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); } check_added_monitors!(nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); + assert_eq!(events_1.len(), 2); match events_1[0] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), }; + match events_1[1] { + MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()), + _ => panic!("Unexpected event"), + }; // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentFailed event @@ -7321,6 +7477,436 @@ mod tests { do_test_monitor_temporary_update_fail(3 | 8 | 16); } + #[test] + fn test_monitor_update_fail_cs() { + // Tests handling of a monitor update failure when processing an incoming commitment_signed + let mut nodes = create_network(2); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + nodes[0].node.send_payment(route, our_payment_hash).unwrap(); + check_added_monitors!(nodes[0], 1); + + let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + let responses = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(responses.len(), 2); + + match responses[0] { + MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap(); + check_added_monitors!(nodes[0], 1); + }, + _ => panic!("Unexpected event"), + } + match responses[1] { + MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => { + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }, + _ => panic!("Unexpected event"), + } + + *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[0].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[0], 1); + + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + + events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { payment_hash, amt } => { + assert_eq!(payment_hash, our_payment_hash); + assert_eq!(amt, 1000000); + }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + } + + fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { + // Tests handling of a monitor update failure when processing an incoming RAA + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + // Rebalance a bit so that we can send backwards from 2 to 1. + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Route a first payment that we'll fail backwards + let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA + assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, PaymentFailReason::PreimageUnknown)); + check_added_monitors!(nodes[2], 1); + + let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + + let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + check_added_monitors!(nodes[0], 0); + + // While the second channel is AwaitingRAA, forward a second payment to get it into the + // holding cell. + let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_2).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); + + let events_1 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Now fail monitor updating. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + // Attempt to forward a third payment but fail due to the second channel being unavailable + // for forwarding. + + let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[0].node.send_payment(route, payment_hash_3).unwrap(); + check_added_monitors!(nodes[0], 1); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel + send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); + check_added_monitors!(nodes[1], 0); + + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_2.len(), 1); + match events_2.remove(0) { + MessageSendEvent::UpdateHTLCs { node_id, updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events[0] { + assert_eq!(payment_hash, payment_hash_3); + assert!(!rejected_by_dest); + } else { panic!("Unexpected event!"); } + }, + _ => panic!("Unexpected event type!"), + }; + + let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { + // Try to route another payment backwards from 2 to make sure 1 holds off on responding + let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap(); + nodes[2].node.send_payment(route, payment_hash_4).unwrap(); + check_added_monitors!(nodes[2], 1); + + send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) { + assert_eq!(err, "Previous monitor update failure prevented generation of RAA"); + } else { panic!(); } + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + (Some(payment_preimage_4), Some(payment_hash_4)) + } else { (None, None) }; + + // Restore monitor updating, ensuring we immediately get a fail-back update and a + // update_add update. + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 2); + + let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); + if test_ignore_second_cs { + assert_eq!(events_3.len(), 3); + } else { + assert_eq!(events_3.len(), 2); + } + + // Note that the ordering of the events for different nodes is non-prescriptive, though the + // ordering of the two events that both go to nodes[2] have to stay in the same order. + let messages_a = match events_3.pop().unwrap() { + MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + (updates.update_fail_htlcs.remove(0), updates.commitment_signed) + }, + _ => panic!("Unexpected event type!"), + }; + let raa = if test_ignore_second_cs { + match events_3.remove(1) { + MessageSendEvent::SendRevokeAndACK { node_id, msg } => { + assert_eq!(node_id, nodes[2].node.get_our_node_id()); + Some(msg.clone()) + }, + _ => panic!("Unexpected event"), + } + } else { None }; + let send_event_b = SendEvent::from_event(events_3.remove(0)); + assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + + // Now deliver the new messages... + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); + let events_4 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_4.len(), 1); + if let Event::PaymentFailed { payment_hash, rejected_by_dest } = events_4[0] { + assert_eq!(payment_hash, payment_hash_1); + assert!(rejected_by_dest); + } else { panic!("Unexpected event!"); } + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap(); + if test_ignore_second_cs { + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(bs_cs.update_add_htlcs.is_empty()); + assert!(bs_cs.update_fail_htlcs.is_empty()); + assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); + assert!(bs_cs.update_fulfill_htlcs.is_empty()); + assert!(bs_cs.update_fee.is_none()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + assert!(as_cs.update_add_htlcs.is_empty()); + assert!(as_cs.update_fail_htlcs.is_empty()); + assert!(as_cs.update_fail_malformed_htlcs.is_empty()); + assert!(as_cs.update_fulfill_htlcs.is_empty()); + assert!(as_cs.update_fee.is_none()); + + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap(); + check_added_monitors!(nodes[2], 1); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap(); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } else { + commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false); + } + + let events_5 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_5.len(), 1); + match events_5[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[2].node.process_pending_htlc_forwards(); + + let events_6 = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events_6.len(), 1); + match events_6[0] { + Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + _ => panic!("Unexpected event"), + }; + + if test_ignore_second_cs { + let events_7 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_7.len(), 1); + match events_7[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); + + send_event = SendEvent::from_node(&nodes[1]); + assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.msgs.len(), 1); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); + + let events_8 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_8.len(), 1); + match events_8[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); + nodes[0].node.process_pending_htlc_forwards(); + + let events_9 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_9.len(), 1); + match events_9[0] { + Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + _ => panic!("Unexpected event"), + }; + claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); + } + + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); + } + + #[test] + fn test_monitor_update_fail_raa() { + do_test_monitor_update_fail_raa(false); + do_test_monitor_update_fail_raa(true); + } + + #[test] + fn test_monitor_update_fail_reestablish() { + // Simple test for message retransmission after monitor update failure on + // channel_reestablish generating a monitor update (which comes from freeing holding cell + // HTLCs). + let mut nodes = create_network(3); + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + assert!(nodes[2].node.claim_funds(our_payment_preimage)); + check_added_monitors!(nodes[2], 1); + let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + + if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() { + assert_eq!(err, "Failed to update ChannelMonitor"); + } else { panic!(); } + check_added_monitors!(nodes[1], 1); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id()); + + assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id())); + assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id())); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap(); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap(); + check_added_monitors!(nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + nodes[1].node.test_restore_channel_monitor(); + check_added_monitors!(nodes[1], 1); + + updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage), + _ => panic!("Unexpected event"), + } + } + #[test] fn test_invalid_channel_announcement() { //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs