Merge pull request #988 from TheBlueMatt/2021-07-chan-details-usability
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 8 Jul 2021 17:25:53 +0000 (17:25 +0000)
committerGitHub <noreply@github.com>
Thu, 8 Jul 2021 17:25:53 +0000 (17:25 +0000)
Improve ChannelDetails readability significantly.

fuzz/src/chanmon_consistency.rs
lightning-background-processor/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/routing/network_graph.rs
lightning/src/util/events.rs

index 38e12b8496bcc1c5ac0d6fe269acc38aeb8959a2..4aa641d1fa4144275764a9b205ca87ea2b8921c5 100644 (file)
@@ -571,7 +571,12 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                events::MessageSendEvent::SendFundingLocked { .. } => continue,
                                                events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
                                                events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => continue,
-                                               _ => panic!("Unhandled message event"),
+                                               events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+                                                       assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+                                                       if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
+                                                       *node_id == a_id
+                                               },
+                                               _ => panic!("Unhandled message event {:?}", event),
                                        };
                                        if push_a { ba_events.push(event); } else { bc_events.push(event); }
                                }
@@ -692,7 +697,16 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                        // Can be generated due to a payment forward being rejected due to a
                                                        // channel having previously failed a monitor update
                                                },
-                                               _ => panic!("Unhandled message event"),
+                                               events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+                                                       // When we reconnect we will resend a channel_update to make sure our
+                                                       // counterparty has the latest parameters for receiving payments
+                                                       // through us. We do, however, check that the message does not include
+                                                       // the "disabled" bit, as we should never ever have a channel which is
+                                                       // disabled when we send such an update (or it may indicate channel
+                                                       // force-close which we should detect as an error).
+                                                       assert_eq!(msg.contents.flags & 2, 0);
+                                               },
+                                               _ => panic!("Unhandled message event {:?}", event),
                                        }
                                        if $limit_events != ProcessMessages::AllMessages {
                                                break;
@@ -722,6 +736,9 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                        events::MessageSendEvent::SendFundingLocked { .. } => {},
                                                        events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
                                                        events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+                                                       events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+                                                               assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+                                                       },
                                                        _ => panic!("Unhandled message event"),
                                                }
                                        }
@@ -737,6 +754,9 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                        events::MessageSendEvent::SendFundingLocked { .. } => {},
                                                        events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
                                                        events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+                                                       events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+                                                               assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+                                                       },
                                                        _ => panic!("Unhandled message event"),
                                                }
                                        }
index 197eff45ab5c101d3d29a41b5b1fdf8bc21aba54..3a36bb5625eab033b05c5aec3cfa183eb2d67a7d 100644 (file)
@@ -442,9 +442,13 @@ mod tests {
 
                // Confirm the funding transaction.
                confirm_transaction(&mut nodes[0], &funding_tx);
+               let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
                confirm_transaction(&mut nodes[1], &funding_tx);
-               nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
-               nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+               let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
+               let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
+               let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
 
                assert!(bg_processor.stop().is_ok());
 
index dd7b1906ca079aced33d3a62384fc1772d02b49c..5f90b030dfb12ac78a0a089b9acf0a40e43f6166 100644 (file)
@@ -1158,7 +1158,10 @@ fn test_monitor_update_fail_reestablish() {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       assert_eq!(
+               get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
+                       .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
+
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
@@ -1172,10 +1175,15 @@ fn test_monitor_update_fail_reestablish() {
        assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
+       assert_eq!(
+               get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
+                       .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
        check_added_monitors!(nodes[1], 0);
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       assert_eq!(
+               get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
+                       .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1352,14 +1360,14 @@ fn claim_while_disconnected_monitor_update_fail() {
        let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
-       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 
        // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
        // update.
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1492,7 +1500,9 @@ fn monitor_failed_no_reestablish_response() {
        let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
+       let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
+       let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
index e38b232e92d7089ec08c6184a130e052fe7c2c1c..2f51fbab6bb735c574d6dcaf6d246baebeb6de6f 100644 (file)
@@ -808,7 +808,7 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                        ChannelError::CloseDelayBroadcast(msg) => {
                                log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
@@ -816,7 +816,7 @@ macro_rules! convert_chan_err {
                                        $short_to_id.remove(&short_id);
                                }
                                let shutdown_res = $channel.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        }
                }
        }
@@ -872,7 +872,8 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok()));
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+                                               $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -1260,9 +1261,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                let chan_update = if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
-                               Some(update)
-                       } else { None }
+                       self.get_channel_update_for_broadcast(&chan).ok()
                } else { None };
 
                if let Some(update) = chan_update {
@@ -1311,7 +1310,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
                self.finish_force_close_channel(chan.force_shutdown(true));
-               if let Ok(update) = self.get_channel_update(&chan) {
+               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                msg: update
@@ -1571,31 +1570,31 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // hopefully an attacker trying to path-trace payments cannot make this occur
                                        // on a small/per-node/per-channel scale.
                                        if !chan.is_live() { // channel_disabled
-                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
-                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) });
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
-                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
-                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
                                        // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
-                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
                                        if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
                                                break Some(("CLTV expiry is too far in the future", 21, None));
                                        }
-                                       // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
-                                       // But, to be safe against policy reception, we use a longuer delay.
+                                       // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
+                                       // But, to be safe against policy reception, we use a longer delay.
                                        if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
-                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+                                               break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
                                        }
 
                                        break None;
@@ -1623,9 +1622,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                (pending_forward_info, channel_state.unwrap())
        }
 
-       /// only fails if the channel does not yet have an assigned short_id
+       /// Gets the current channel_update for the given channel. This first checks if the channel is
+       /// public, and thus should be called whenever the result is going to be passed out in a
+       /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
+       ///
+       /// May be called with channel_state already locked!
+       fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               if !chan.should_announce() {
+                       return Err(LightningError {
+                               err: "Cannot broadcast a channel_update for a private channel".to_owned(),
+                               action: msgs::ErrorAction::IgnoreError
+                       });
+               }
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+               self.get_channel_update_for_unicast(chan)
+       }
+
+       /// Gets the current channel_update for the given channel. This does not check if the channel
+       /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+       /// and thus MUST NOT be called unless the recipient of the resulting message has already
+       /// provided evidence that they know about the existence of the channel.
        /// May be called with channel_state already locked!
-       fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+       fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
                let short_channel_id = match chan.get_short_channel_id() {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -2018,7 +2037,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg,
-                                       update_msg: match self.get_channel_update(chan) {
+                                       update_msg: match self.get_channel_update_for_broadcast(chan) {
                                                Ok(msg) => msg,
                                                Err(_) => continue,
                                        },
@@ -2110,7 +2129,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
-                                                                                       let chan_update = self.get_channel_update(chan.get()).unwrap();
+                                                                                       let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
                                                                                        ));
@@ -2182,7 +2201,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
+                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
@@ -2385,7 +2404,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
                                        ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
                                        ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -2394,7 +2413,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                        },
                                        ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -2444,7 +2463,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
-                                                               if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+                                                               if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
                                                                        (0x1000|7, upd.encode_with_len())
                                                                } else {
                                                                        (0x4000|10, Vec::new())
@@ -2808,7 +2827,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let (mut pending_failures, chan_restoration_res) = {
+               let chan_restoration_res;
+               let mut pending_failures = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -2820,7 +2840,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
-                       (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+                       let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+                               // We only send a channel_update in the case where we are just now sending a
+                               // funding_locked and the channel is in a usable state. Further, we rely on the
+                               // normal announcement_signatures process to send a channel_update for public
+                               // channels, only generating a unicast channel_update if this is a private channel.
+                               Some(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: channel.get().get_counterparty_node_id(),
+                                       msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+                               })
+                       } else { None };
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+                       if let Some(upd) = channel_update {
+                               channel_state.pending_msg_events.push(upd);
+                       }
+                       pending_failures
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                for failure in pending_failures.drain(..) {
@@ -2983,6 +3017,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
                                        });
+                               } else if chan.get().is_usable() {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: counterparty_node_id.clone(),
+                                               msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                       });
                                }
                                Ok(())
                        },
@@ -3027,7 +3066,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
@@ -3073,7 +3112,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
                }
                if let Some(chan) = chan_option {
-                       if let Ok(update) = self.get_channel_update(&chan) {
+                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut channel_state = self.channel_state.lock().unwrap();
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
@@ -3111,7 +3150,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if let Ok(upd) = self.get_channel_update(chan) {
+                                                       let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
                                                                        let mut res = Vec::with_capacity(8 + 128);
                                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
@@ -3373,7 +3412,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                        msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
-                                       update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
+                                       // Note that announcement_signatures fails if the channel cannot be announced,
+                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                       update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
                                });
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -3403,7 +3444,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
-                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let msg_from_node_one = msg.contents.flags & 1 == 0;
+                               if were_node_one == msg_from_node_one {
+                                       return Ok(NotifyOption::SkipPersist);
+                               } else {
+                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
@@ -3411,7 +3458,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
-               let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = {
+               let chan_restoration_res;
+               let (htlcs_failed_forward, need_lnd_workaround) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
 
@@ -3426,15 +3474,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                        let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
                                                try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+                                       let mut channel_update = None;
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
                                                        msg,
                                                });
+                                       } else if chan.get().is_usable() {
+                                               // If the channel is in a usable state (ie the channel is not being shut
+                                               // down), send a unicast channel_update to our counterparty to make sure
+                                               // they have the latest channel parameters.
+                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: chan.get().get_counterparty_node_id(),
+                                                       msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+                                               });
                                        }
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
-                                       (htlcs_failed_forward, need_lnd_workaround,
-                                               handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+                                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+                                       if let Some(upd) = channel_update {
+                                               channel_state.pending_msg_events.push(upd);
+                                       }
+                                       (htlcs_failed_forward, need_lnd_workaround)
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
@@ -3531,7 +3591,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(false));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -3970,7 +4030,7 @@ where
                                let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
+                                               let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
                                                timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
                                                        failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
                                                        data: chan_update,
@@ -3987,6 +4047,12 @@ where
                                                                node_id: channel.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
+                                               } else if channel.is_usable() {
+                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: channel.get_counterparty_node_id(),
+                                                               msg: self.get_channel_update_for_unicast(channel).unwrap(),
+                                                       });
                                                } else {
                                                        log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
                                                }
@@ -3999,7 +4065,7 @@ where
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
-                                       if let Ok(update) = self.get_channel_update(&channel) {
+                                       if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
@@ -4189,7 +4255,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        short_to_id.remove(&short_id);
                                                }
                                                failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update(&chan) {
+                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
                                                        });
@@ -4232,6 +4298,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                       &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
@@ -4965,6 +5032,31 @@ mod tests {
                // At this point the channel info given by peers should still be the same.
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // An earlier version of handle_channel_update didn't check the directionality of the
+               // update message and would always update the local fee info, even if our peer was
+               // (spuriously) forwarding us our own channel_update.
+               let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
+               let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+               let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+
+               // First deliver each peers' own message, checking that the node doesn't need to be
+               // persisted and that its channel info remains the same.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+               // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
+               // the channel info has updated.
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
        }
 }
 
@@ -5065,7 +5157,19 @@ pub mod bench {
                Listen::block_connected(&node_b, &block, 1);
 
                node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
-               node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+               let msg_events = node_a.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 2);
+               match msg_events[0] {
+                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                               get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+                       },
+                       _ => panic!(),
+               }
+               match msg_events[1] {
+                       MessageSendEvent::SendChannelUpdate { .. } => {},
+                       _ => panic!(),
+               }
 
                let dummy_graph = NetworkGraph::new(genesis_hash);
 
index c9dcb8517c323569d975c3bb9240c6c7eb7eae6d..2e9439916df7b8c64760813d94685621183dba01 100644 (file)
@@ -1583,18 +1583,20 @@ macro_rules! handle_chan_reestablish_msgs {
                        let mut revoke_and_ack = None;
                        let mut commitment_update = None;
                        let order = if let Some(ev) = msg_events.get(idx) {
-                               idx += 1;
                                match ev {
                                        &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                revoke_and_ack = Some(msg.clone());
+                                               idx += 1;
                                                RAACommitmentOrder::RevokeAndACKFirst
                                        },
                                        &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                commitment_update = Some(updates.clone());
+                                               idx += 1;
                                                RAACommitmentOrder::CommitmentFirst
                                        },
+                                       &MessageSendEvent::SendChannelUpdate { .. } => RAACommitmentOrder::CommitmentFirst,
                                        _ => panic!("Unexpected event"),
                                }
                        } else {
@@ -1607,16 +1609,24 @@ macro_rules! handle_chan_reestablish_msgs {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                assert!(revoke_and_ack.is_none());
                                                revoke_and_ack = Some(msg.clone());
+                                               idx += 1;
                                        },
                                        &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
                                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                                assert!(commitment_update.is_none());
                                                commitment_update = Some(updates.clone());
+                                               idx += 1;
                                        },
+                                       &MessageSendEvent::SendChannelUpdate { .. } => {},
                                        _ => panic!("Unexpected event"),
                                }
                        }
 
+                       if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
+                               assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+                               assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+                       }
+
                        (funding_locked, revoke_and_ack, commitment_update, order)
                }
        }
index 4e51f6b4bf5fe9dcdd08d393e31461b05aa5ff9b..073f3578de60b4419ed196d4b39ae5758942d0fe 100644 (file)
@@ -1039,7 +1039,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
                node_0_2nd_shutdown
        } else {
-               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+               assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
                get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
        };
index 9e395f1107b1d62bfc26da2560b78e389987938c..96ec31c98ee1b4aa289f2ba78ee32c76a4e5f22d 100644 (file)
@@ -1264,6 +1264,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                                                        self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None);
                                                }
                                        },
+                                       MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+                                               log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+                                                               log_pubkey!(node_id), msg.contents.short_channel_id);
+                                               let peer = get_peer_for_forwarding!(node_id);
+                                               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+                                       },
                                        MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => {
                                                self.message_handler.route_handler.handle_htlc_fail_channel_update(update);
                                        },
index 80a7010bd18dd50de0a9fa0854ba74f99f61f707..22cd2a92f6f6fba841de9f1cc01d86bf401552fb 100644 (file)
@@ -377,23 +377,39 @@ impl<C: Deref , L: Deref > RoutingMessageHandler for NetGraphMsgHandler<C, L> wh
 
                let mut pending_events = self.pending_events.lock().unwrap();
                let batch_count = batches.len();
+               let mut prev_batch_endblock = msg.first_blocknum;
                for (batch_index, batch) in batches.into_iter().enumerate() {
-                       // Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent
-                       // must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and
-                       // sequence completion is now explicitly.
-                       let first_blocknum = 0;
-
-                       // Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the
-                       // query's value. Prior batches must use the number of blocks that fit into the message. We'll
-                       // base this off the last SCID in the batch since we've somewhat abusing first_blocknum.
-                       let number_of_blocks = if batch_index == batch_count-1 {
-                               msg.end_blocknum()
-                       } else {
-                               block_from_scid(batch.last().unwrap()) + 1
+                       // Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
+                       // and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
+                       //
+                       // Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
+                       // reply is >= the previous reply's `first_blocknum` and either exactly the previous
+                       // reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
+                       // significant diversion from the requirements set by the spec, and, in case of blocks
+                       // with no channel opens (e.g. empty blocks), requires that we use the previous value
+                       // and *not* derive the first_blocknum from the actual first block of the reply.
+                       let first_blocknum = prev_batch_endblock;
+
+                       // Each message carries the number of blocks (from the `first_blocknum`) its contents
+                       // fit in. Though there is no requirement that we use exactly the number of blocks its
+                       // contents are from, except for the bogus requirements c-lightning enforces, above.
+                       //
+                       // Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
+                       // >= the query's end block. Thus, for the last reply, we calculate the difference
+                       // between the query's end block and the start of the reply.
+                       //
+                       // Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
+                       // first_blocknum will be either msg.first_blocknum or a higher block height.
+                       let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 {
+                               (true, msg.end_blocknum() - first_blocknum)
+                       }
+                       // Prior replies should use the number of blocks that fit into the reply. Overflow
+                       // safe since first_blocknum is always <= last SCID's block.
+                       else {
+                               (false, block_from_scid(batch.last().unwrap()) - first_blocknum)
                        };
 
-                       // Only true for the last message in a sequence
-                       let sync_complete = batch_index == batch_count - 1;
+                       prev_batch_endblock = first_blocknum + number_of_blocks;
 
                        pending_events.push(MessageSendEvent::SendReplyChannelRange {
                                node_id: their_node_id.clone(),
@@ -2239,8 +2255,8 @@ mod tests {
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 0x01000000,
+                                       first_blocknum: 0xffffff,
+                                       number_of_blocks: 1,
                                        sync_complete: true,
                                        short_channel_ids: vec![]
                                },
@@ -2260,8 +2276,8 @@ mod tests {
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 2000,
+                                       first_blocknum: 1000,
+                                       number_of_blocks: 1000,
                                        sync_complete: true,
                                        short_channel_ids: vec![],
                                }
@@ -2281,8 +2297,8 @@ mod tests {
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 0xffffffff,
+                                       first_blocknum: 0xfe0000,
+                                       number_of_blocks: 0xffffffff - 0xfe0000,
                                        sync_complete: true,
                                        short_channel_ids: vec![
                                                0xfffffe_ffffff_ffff, // max
@@ -2304,8 +2320,8 @@ mod tests {
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 108000,
+                                       first_blocknum: 100000,
+                                       number_of_blocks: 8000,
                                        sync_complete: true,
                                        short_channel_ids: (100000..=107999)
                                                .map(|block| scid_from_parts(block, 0, 0).unwrap())
@@ -2327,8 +2343,8 @@ mod tests {
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 108000,
+                                       first_blocknum: 100000,
+                                       number_of_blocks: 7999,
                                        sync_complete: false,
                                        short_channel_ids: (100000..=107999)
                                                .map(|block| scid_from_parts(block, 0, 0).unwrap())
@@ -2336,8 +2352,8 @@ mod tests {
                                },
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 108001,
+                                       first_blocknum: 107999,
+                                       number_of_blocks: 2,
                                        sync_complete: true,
                                        short_channel_ids: vec![
                                                scid_from_parts(108000, 0, 0).unwrap(),
@@ -2359,8 +2375,8 @@ mod tests {
                        vec![
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 108002,
+                                       first_blocknum: 100002,
+                                       number_of_blocks: 7999,
                                        sync_complete: false,
                                        short_channel_ids: (100002..=108001)
                                                .map(|block| scid_from_parts(block, 0, 0).unwrap())
@@ -2368,8 +2384,8 @@ mod tests {
                                },
                                ReplyChannelRange {
                                        chain_hash: chain_hash.clone(),
-                                       first_blocknum: 0,
-                                       number_of_blocks: 108002,
+                                       first_blocknum: 108001,
+                                       number_of_blocks: 1,
                                        sync_complete: true,
                                        short_channel_ids: vec![
                                                scid_from_parts(108001, 1, 0).unwrap(),
@@ -2386,6 +2402,9 @@ mod tests {
                expected_ok: bool,
                expected_replies: Vec<ReplyChannelRange>
        ) {
+               let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
+               let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
+               let query_end_blocknum = msg.end_blocknum();
                let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg);
 
                if expected_ok {
@@ -2407,6 +2426,17 @@ mod tests {
                                        assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
                                        assert_eq!(msg.sync_complete, expected_reply.sync_complete);
                                        assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
+
+                                       // Enforce exactly the sequencing requirements present on c-lightning v0.9.3
+                                       assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1));
+                                       assert!(msg.first_blocknum >= max_firstblocknum);
+                                       max_firstblocknum = msg.first_blocknum;
+                                       c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks);
+
+                                       // Check that the last block count is >= the query's end_blocknum
+                                       if i == events.len() - 1 {
+                                               assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum);
+                                       }
                                },
                                _ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
                        }
index dbb4178fb50e4643db6e2332d0e257bab18f8e29..0fc7c6b3a3d166096019407784b929588ddbddd8 100644 (file)
@@ -389,6 +389,15 @@ pub enum MessageSendEvent {
                /// The channel_update which should be sent.
                msg: msgs::ChannelUpdate,
        },
+       /// Used to indicate that a channel_update should be sent to a single peer.
+       /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a
+       /// private channel and we shouldn't be informing all of our peers of channel parameters.
+       SendChannelUpdate {
+               /// The node_id of the node which should receive this message
+               node_id: PublicKey,
+               /// The channel_update which should be sent.
+               msg: msgs::ChannelUpdate,
+       },
        /// Broadcast an error downstream to be handled
        HandleError {
                /// The node_id of the node which should receive this message