events::MessageSendEvent::SendFundingLocked { .. } => continue,
events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => continue,
- _ => panic!("Unhandled message event"),
+ events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+ if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
+ *node_id == a_id
+ },
+ _ => panic!("Unhandled message event {:?}", event),
};
if push_a { ba_events.push(event); } else { bc_events.push(event); }
}
// Can be generated due to a payment forward being rejected due to a
// channel having previously failed a monitor update
},
- _ => panic!("Unhandled message event"),
+ events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+ // When we reconnect we will resend a channel_update to make sure our
+ // counterparty has the latest parameters for receiving payments
+ // through us. We do, however, check that the message does not include
+ // the "disabled" bit, as we should never ever have a channel which is
+ // disabled when we send such an update (or it may indicate channel
+ // force-close which we should detect as an error).
+ assert_eq!(msg.contents.flags & 2, 0);
+ },
+ _ => panic!("Unhandled message event {:?}", event),
}
if $limit_events != ProcessMessages::AllMessages {
break;
events::MessageSendEvent::SendFundingLocked { .. } => {},
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+ assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+ },
_ => panic!("Unhandled message event"),
}
}
events::MessageSendEvent::SendFundingLocked { .. } => {},
events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
+ assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
+ },
_ => panic!("Unhandled message event"),
}
}
// Confirm the funding transaction.
confirm_transaction(&mut nodes[0], &funding_tx);
+ let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
confirm_transaction(&mut nodes[1], &funding_tx);
- nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
- nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+ let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
+ let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
+ let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
assert!(bg_processor.stop().is_ok());
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert_eq!(
+ get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
+ .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
+
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
+ assert_eq!(
+ get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
+ .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
check_added_monitors!(nodes[1], 0);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert_eq!(
+ get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
+ .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
// Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
// update.
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
- assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
+ let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
+ let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
action: msgs::ErrorAction::IgnoreError
});
}
+ log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
self.get_channel_update_for_unicast(chan)
}
/// provided evidence that they know about the existence of the channel.
/// May be called with channel_state already locked!
fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id() {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let (mut pending_failures, chan_restoration_res) = {
+ let chan_restoration_res;
+ let mut pending_failures = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
}
let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
- (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+ let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+ // We only send a channel_update in the case where we are just now sending a
+ // funding_locked and the channel is in a usable state. Further, we rely on the
+ // normal announcement_signatures process to send a channel_update for public
+ // channels, only generating a unicast channel_update if this is a private channel.
+ Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get().get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+ })
+ } else { None };
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+ if let Some(upd) = channel_update {
+ channel_state.pending_msg_events.push(upd);
+ }
+ pending_failures
};
post_handle_chan_restoration!(self, chan_restoration_res);
for failure in pending_failures.drain(..) {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
});
+ } else if chan.get().is_usable() {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id.clone(),
+ msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+ });
}
Ok(())
},
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = {
+ let chan_restoration_res;
+ let (htlcs_failed_forward, need_lnd_workaround) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+ let mut channel_update = None;
if let Some(msg) = shutdown {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
msg,
});
+ } else if chan.get().is_usable() {
+ // If the channel is in a usable state (ie the channel is not being shut
+ // down), send a unicast channel_update to our counterparty to make sure
+ // they have the latest channel parameters.
+ channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: chan.get().get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+ });
}
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
- (htlcs_failed_forward, need_lnd_workaround,
- handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+ if let Some(upd) = channel_update {
+ channel_state.pending_msg_events.push(upd);
+ }
+ (htlcs_failed_forward, need_lnd_workaround)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
node_id: channel.get_counterparty_node_id(),
msg: announcement_sigs,
});
+ } else if channel.is_usable() {
+ log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+ pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(channel).unwrap(),
+ });
} else {
log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
}
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
Listen::block_connected(&node_b, &block, 1);
node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
- node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+ let msg_events = node_a.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 2);
+ match msg_events[0] {
+ MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+ node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+ get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+ },
+ _ => panic!(),
+ }
+ match msg_events[1] {
+ MessageSendEvent::SendChannelUpdate { .. } => {},
+ _ => panic!(),
+ }
let dummy_graph = NetworkGraph::new(genesis_hash);
let mut revoke_and_ack = None;
let mut commitment_update = None;
let order = if let Some(ev) = msg_events.get(idx) {
- idx += 1;
match ev {
&MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
revoke_and_ack = Some(msg.clone());
+ idx += 1;
RAACommitmentOrder::RevokeAndACKFirst
},
&MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
commitment_update = Some(updates.clone());
+ idx += 1;
RAACommitmentOrder::CommitmentFirst
},
+ &MessageSendEvent::SendChannelUpdate { .. } => RAACommitmentOrder::CommitmentFirst,
_ => panic!("Unexpected event"),
}
} else {
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
assert!(revoke_and_ack.is_none());
revoke_and_ack = Some(msg.clone());
+ idx += 1;
},
&MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
assert!(commitment_update.is_none());
commitment_update = Some(updates.clone());
+ idx += 1;
},
+ &MessageSendEvent::SendChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
}
+ if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+ }
+
(funding_locked, revoke_and_ack, commitment_update, order)
}
}
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
node_0_2nd_shutdown
} else {
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+ assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
};
self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None);
}
},
+ MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+ log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id), msg.contents.short_channel_id);
+ let peer = get_peer_for_forwarding!(node_id);
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ },
MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => {
self.message_handler.route_handler.handle_htlc_fail_channel_update(update);
},
/// The channel_update which should be sent.
msg: msgs::ChannelUpdate,
},
+ /// Used to indicate that a channel_update should be sent to a single peer.
+ /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a
+ /// private channel and we shouldn't be informing all of our peers of channel parameters.
+ SendChannelUpdate {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The channel_update which should be sent.
+ msg: msgs::ChannelUpdate,
+ },
/// Broadcast an error downstream to be handled
HandleError {
/// The node_id of the node which should receive this message