From: Matt Corallo Date: Thu, 17 Nov 2022 05:48:21 +0000 (+0000) Subject: Wait to free the holding cell during channel_reestablish handling X-Git-Tag: v0.0.113~31^2~2 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=7e9b88a5cd008c34221bc3eb5084511bd894cc3d;p=rust-lightning Wait to free the holding cell during channel_reestablish handling When we process a `channel_reestablish` message we free the HTLC update holding cell as things may have changed while we were disconnected. However, some time ago, to handle freeing from the holding cell when a monitor update completes, we added a holding cell freeing check in `get_and_clear_pending_msg_events`. This leaves the in-`channel_reestablish` holding cell clear redundant, as doing it immediately or is `get_and_clear_pending_msg_events` is not a user-visible difference. Thus, we remove the redundant code here, substantially simplifying `handle_chan_restoration_locked` while we're at it. --- diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index f16041d2a..e78b8999f 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1128,6 +1128,7 @@ fn test_monitor_update_fail_reestablish() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5d0c3dd4d..d49221cd0 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -439,8 +439,6 @@ pub(super) struct ReestablishResponses { pub raa: Option, pub commitment_update: Option, pub order: RAACommitmentOrder, - pub mon_update: Option, - pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>, pub announcement_sigs: Option, pub shutdown_msg: Option, } @@ -3955,9 +3953,8 @@ impl Channel { // Short circuit the whole handler as there is nothing we can resend them return Ok(ReestablishResponses { channel_ready: None, - raa: None, commitment_update: None, mon_update: None, + raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, - holding_cell_failed_htlcs: Vec::new(), shutdown_msg, announcement_sigs, }); } @@ -3970,9 +3967,8 @@ impl Channel { next_per_commitment_point, short_channel_id_alias: Some(self.outbound_scid_alias), }), - raa: None, commitment_update: None, mon_update: None, + raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, - holding_cell_failed_htlcs: Vec::new(), shutdown_msg, announcement_sigs, }); } @@ -4015,46 +4011,12 @@ impl Channel { log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); } - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { - // We're up-to-date and not waiting on a remote revoke (if we are our - // channel_reestablish should result in them sending a revoke_and_ack), but we may - // have received some updates while we were disconnected. Free the holding cell - // now! - match self.free_holding_cell_htlcs(logger) { - Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)), - Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) => - panic!("Got non-channel-failing result from free_holding_cell_htlcs"), - Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: Some(commitment_update), - order: self.resend_order.clone(), - mon_update: Some(monitor_update), - holding_cell_failed_htlcs, - }) - }, - Ok((None, holding_cell_failed_htlcs)) => { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: None, - order: self.resend_order.clone(), - mon_update: None, - holding_cell_failed_htlcs, - }) - }, - } - } else { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: None, - order: self.resend_order.clone(), - mon_update: None, - holding_cell_failed_htlcs: Vec::new(), - }) - } + Ok(ReestablishResponses { + channel_ready, shutdown_msg, announcement_sigs, + raa: required_revoke, + commitment_update: None, + order: self.resend_order.clone(), + }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); @@ -4066,9 +4028,8 @@ impl Channel { self.monitor_pending_commitment_signed = true; Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, - commitment_update: None, raa: None, mon_update: None, + commitment_update: None, raa: None, order: self.resend_order.clone(), - holding_cell_failed_htlcs: Vec::new(), }) } else { Ok(ReestablishResponses { @@ -4076,8 +4037,6 @@ impl Channel { raa: required_revoke, commitment_update: Some(self.get_last_commitment_update(logger)), order: self.resend_order.clone(), - mon_update: None, - holding_cell_failed_htlcs: Vec::new(), }) } } else { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 070032772..abe1cb765 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1518,13 +1518,11 @@ macro_rules! emit_channel_ready_event { } macro_rules! handle_chan_restoration_locked { - ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, - $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, + ($self: ident, $channel_state: expr, $channel_entry: expr, + $raa: expr, $commitment_update: expr, $order: expr, $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { { let mut htlc_forwards = None; - let chanmon_update: Option = $chanmon_update; // Force type-checking to resolve - let chanmon_update_is_none = chanmon_update.is_none(); let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); let res = loop { let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve @@ -1533,24 +1531,7 @@ macro_rules! handle_chan_restoration_locked { $channel_entry.get().get_funding_txo().unwrap(), forwards)); } - if chanmon_update.is_some() { - // On reconnect, we, by definition, only resend a channel_ready if there have been - // no commitment updates, so the only channel monitor update which could also be - // associated with a channel_ready would be the funding_created/funding_signed - // monitor update. That monitor update failing implies that we won't send - // channel_ready until it's been updated, so we can't have a channel_ready and a - // monitor update here (so we don't bother to handle it correctly below). - assert!($channel_ready.is_none()); - // A channel monitor update makes no sense without either a channel_ready or a - // commitment update to process after it. Since we can't have a channel_ready, we - // only bother to handle the monitor-update + commitment_update case below. - assert!($commitment_update.is_some()); - } - if let Some(msg) = $channel_ready { - // Similar to the above, this implies that we're letting the channel_ready fly - // before it should be allowed to. - assert!(chanmon_update.is_none()); send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg); } if let Some(msg) = $announcement_sigs { @@ -1562,33 +1543,6 @@ macro_rules! handle_chan_restoration_locked { emit_channel_ready_event!($self, $channel_entry.get_mut()); - let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve - if let Some(monitor_update) = chanmon_update { - // We only ever broadcast a funding transaction in response to a funding_signed - // message and the resulting monitor update. Thus, on channel_reestablish - // message handling we can't have a funding transaction to broadcast. When - // processing a monitor update finishing resulting in a funding broadcast, we - // cannot have a second monitor update, thus this case would indicate a bug. - assert!(funding_broadcastable.is_none()); - // Given we were just reconnected or finished updating a channel monitor, the - // only case where we can get a new ChannelMonitorUpdate would be if we also - // have some commitment updates to send as well. - assert!($commitment_update.is_some()); - match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - let mut order = $order; - if $raa.is_none() { - order = RAACommitmentOrder::CommitmentFirst; - } - break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true); - } - } - } - macro_rules! handle_cs { () => { if let Some(update) = $commitment_update { $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -1615,6 +1569,8 @@ macro_rules! handle_chan_restoration_locked { handle_cs!(); }, } + + let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve if let Some(tx) = funding_broadcastable { log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); $self.tx_broadcaster.broadcast_transaction(&tx); @@ -1622,13 +1578,6 @@ macro_rules! handle_chan_restoration_locked { break Ok(()); }; - if chanmon_update_is_none { - // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop - // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which - // should *never* end up calling back to `chain_monitor.update_channel()`. - assert!(res.is_ok()); - } - (htlc_forwards, res, counterparty_node_id) } } } @@ -4520,7 +4469,7 @@ impl ChannelManager ChannelManager Result<(), MsgHandleErrInternal> { let chan_restoration_res; - let (htlcs_failed_forward, need_lnd_workaround) = { + let need_lnd_workaround = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -5314,18 +5263,17 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; post_handle_chan_restoration!(self, chan_restoration_res); - self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id); if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 677fdccfd..6b247b28f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2413,6 +2413,14 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); } + let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both + if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + idx += 1; + assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + had_channel_update = true; + } + let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { @@ -2457,6 +2465,7 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); idx += 1; assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + assert!(!had_channel_update); } assert_eq!(msg_events.len(), idx); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index dc3d53922..1056ceebd 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -786,9 +786,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[3], 1); assert_eq!(ds_msgs.len(), 2); - if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); } + if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } - let cs_updates = match ds_msgs[0] { + let cs_updates = match ds_msgs[1] { MessageSendEvent::UpdateHTLCs { ref updates, .. } => { nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[2], 1);