node_id: counterparty_node_id,
msg: announcement_sigs,
});
+ } else if $channel_entry.get().is_usable() {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id,
+ msg: $self.get_channel_update_for_unicast($channel_entry.get()).unwrap(),
+ });
}
$channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id());
}
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
});
+ } else if chan.get().is_usable() {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id.clone(),
+ msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+ });
}
Ok(())
},
node_id: channel.get_counterparty_node_id(),
msg: announcement_sigs,
});
+ } else if channel.is_usable() {
+ log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with channel_update for {}", log_bytes!(channel.channel_id()));
+ pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(channel).unwrap(),
+ });
} else {
log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
}
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None);
}
},
+ MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+ log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id), msg.contents.short_channel_id);
+ let (_, peer) = get_peer_for_forwarding!(node_id);
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ },
MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => {
self.message_handler.route_handler.handle_htlc_fail_channel_update(update);
},
/// The channel_update which should be sent.
msg: msgs::ChannelUpdate,
},
+ /// Used to indicate that a channel_update should be sent to a single peer.
+ /// This is used, in contrast to [`Self::BroadcastChannelUpdate`], when the channel is a
+ /// private channel and we shouldn't be informing all of our peers of channel parameters.
+ SendChannelUpdate {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The channel_update which should be sent.
+ msg: msgs::ChannelUpdate,
+ },
/// Broadcast an error downstream to be handled
HandleError {
/// The node_id of the node which should receive this message