self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
- /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
- /// will be accepted on the given channel, and after additional timeout/the closing of all
- /// pending HTLCs, the channel will be closed on chain.
- ///
- /// May generate a SendShutdown message event on success, which should be relayed.
- pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let counterparty_node_id;
Some(peer_state) => {
let peer_state = peer_state.lock().unwrap();
let their_features = &peer_state.latest_features;
- chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features)?
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
},
None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
};
Ok(())
}
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// * If we are the channel initiator, we will pay between our [`Background`] and
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate.
+ /// * If our counterparty is the channel initiator, we will require a channel closing
+ /// transaction feerate of at least our [`Background`] feerate or the feerate which
+ /// would appear on a force-closure transaction, whichever is lower. We will allow our
+ /// counterparty to pay as much fee as they'd like, however.
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, None)
+ }
+
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
+ /// the channel being closed or not:
+ /// * If we are the channel initiator, we will pay at least this feerate on the closing
+ /// transaction. The upper-bound is set by
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
+ /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure
+ /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
+ /// will appear on a force-closure transaction, whichever is lower).
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+ }
+
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
- let (next_hop_data, next_hop_hmac) = {
- match msgs::OnionHopData::read(&mut chacha_stream) {
+ let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
+ match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
Err(err) => {
let error_code = match err {
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
let pending_msg_events = &mut channel_state.pending_msg_events;
let short_to_id = &mut channel_state.short_to_id;
channel_state.by_id.retain(|chan_id, chan| {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push((err, counterparty_node_id));
+ }
+ if !retain_channel { return false; }
+
+ if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+ let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ handle_errors.push((Err(err), chan.get_counterparty_node_id()));
+ if needs_close { return false; }
+ }
+
match chan.channel_update_status() {
ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
_ => {},
}
- let counterparty_node_id = chan.get_counterparty_node_id();
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
- if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
- if err.is_err() {
- handle_errors.push((err, counterparty_node_id));
- }
- retain_channel
+ true
});
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
-
should_persist
});
}
events::Event::PaymentFailed {
payment_hash,
rejected_by_dest: false,
+ network_update: None,
#[cfg(test)]
error_code: None,
#[cfg(test)]
match &onion_error {
&HTLCFailReason::LightningError { ref err } => {
#[cfg(test)]
- let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
#[cfg(not(test))]
- let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
// TODO: If we decided to blame ourselves (or one of our channels) in
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
- if let Some(update) = channel_update {
- self.channel_state.lock().unwrap().pending_msg_events.push(
- events::MessageSendEvent::PaymentFailureNetworkUpdate {
- update,
- }
- );
- }
self.pending_events.lock().unwrap().push(
events::Event::PaymentFailed {
payment_hash: payment_hash.clone(),
rejected_by_dest: !payment_retryable,
+ network_update,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
ref data,
.. } => {
// we get a fail_malformed_htlc from the first hop
- // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
+ // TODO: We'd like to generate a NetworkUpdate for temporary
// failures here, but that would be insufficient as get_route
// generally ignores its view of our own channels as we provide them via
// ChannelDetails.
events::Event::PaymentFailed {
payment_hash: payment_hash.clone(),
rejected_by_dest: path.len() == 1,
+ network_update: None,
#[cfg(test)]
error_code: Some(*failure_code),
#[cfg(test)]
}
if !chan_entry.get().received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our couterparty for channel {}{}.",
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
log_bytes!(msg.channel_id),
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
}
let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
- // Ensure error_code has the UPDATE flag set, since by default we send a
- // channel update along as part of failing the HTLC.
- assert!((error_code & 0x1000) != 0);
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
match pending_forward_info {
PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
- let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
- onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
- let mut res = Vec::with_capacity(8 + 128);
- // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
- res.extend_from_slice(&byte_utils::be16_to_array(0));
- res.extend_from_slice(&upd.encode_with_len()[..]);
- res
- }[..])
+ let reason = if (error_code & 0x1000) != 0 {
+ if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
+ let mut res = Vec::with_capacity(8 + 128);
+ // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+ res.extend_from_slice(&byte_utils::be16_to_array(0));
+ res.extend_from_slice(&upd.encode_with_len()[..]);
+ res
+ }[..])
+ } else {
+ // The only case where we'd be unable to
+ // successfully get a channel update is if the
+ // channel isn't in the fully-funded state yet,
+ // implying our counterparty is trying to route
+ // payments over the channel back to themselves
+ // (because no one else should know the short_id
+ // is a lightning channel yet). We should have
+ // no problem just calling this
+ // unknown_next_peer (0x4000|10).
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+ }
} else {
- // The only case where we'd be unable to
- // successfully get a channel update is if the
- // channel isn't in the fully-funded state yet,
- // implying our counterparty is trying to route
- // payments over the channel back to themselves
- // (cause no one else should know the short_id
- // is a lightning channel yet). We should have
- // no problem just calling this
- // unknown_next_peer (0x4000|10).
- onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
};
let msg = msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
- let event_handler = |event| events.borrow_mut().push(event);
+ let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
self.process_pending_events(&event_handler);
events.into_inner()
}
}
for event in pending_events.drain(..) {
- handler.handle_event(event);
+ handler.handle_event(&event);
}
result
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
None => continue,
}
}
+ if forward_htlcs_count > 0 {
+ // If we have pending HTLCs to forward, assume we either dropped a
+ // `PendingHTLCsForwardable` or the user received it but never processed it as they
+ // shut down before the timer hit. Either way, set the time_forwardable to a small
+ // constant as enough time has likely passed that we should simply handle the forwards
+ // now, or at least after the user gets a chance to reconnect to our peers.
+ pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: Duration::from_secs(2),
+ });
+ }
let background_event_count: u64 = Readable::read(reader)?;
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
// First, send a partial MPP payment.
let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
- let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+ let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
// Use the utility function send_payment_along_path to send the payment with MPP data which
// indicates there are more HTLCs coming.
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
// Next, attempt a keysend payment and make sure it fails.
- let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+ let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
// To start (2), send a keysend payment but don't claim it.
let payment_preimage = PaymentPreimage([42; 32]);
- let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+ let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
- let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+ let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
let first_hops = nodes[0].node.list_usable_channels();
- let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+ let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
nodes[0].logger).unwrap();
nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
- let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+ let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
let first_hops = nodes[0].node.list_usable_channels();
- let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+ let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
nodes[0].logger).unwrap();
// Marshall an MPP route.
let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
- let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();