/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
/// and later, also stores information for retrying the payment.
-enum PendingOutboundPayment {
+pub(crate) enum PendingOutboundPayment {
Legacy {
session_privs: HashSet<[u8; 32]>,
},
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
+ /// Helper function that issues the channel close events
+ fn issue_channel_close_events(&self, channel: &Channel<Signer>, closure_reason: ClosureReason) {
+ let mut pending_events_lock = self.pending_events.lock().unwrap();
+ match channel.unbroadcasted_funding() {
+ Some(transaction) => {
+ pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+ },
+ None => {},
+ }
+ pending_events_lock.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: closure_reason });
+ }
+
fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
msg: channel_update
});
}
- if let Ok(mut pending_events_lock) = self.pending_events.lock() {
- pending_events_lock.push(events::Event::ChannelClosed {
- channel_id: *channel_id,
- reason: ClosureReason::HolderForceClosed
- });
- }
+ self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
}
break Ok(());
},
if let Some(short_id) = chan.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- let mut pending_events_lock = self.pending_events.lock().unwrap();
if peer_node_id.is_some() {
if let Some(peer_msg) = peer_msg {
- pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
+ self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
}
} else {
- pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
+ self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
}
chan.remove_entry().1
} else {
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
- let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable {
- session_privs: HashSet::new(),
- pending_amt_msat: 0,
- payment_hash: *payment_hash,
- payment_secret: *payment_secret,
- starting_block_height: self.best_block.read().unwrap().height(),
- total_msat: total_value,
- });
- assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat));
let err: Result<(), _> = loop {
let mut channel_lock = self.channel_state.lock().unwrap();
if !chan.get().is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
}
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- payment_id,
- }, onion_packet, &self.logger), channel_state, chan)
+ let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+ htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ }, onion_packet, &self.logger),
+ channel_state, chan);
+
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable {
+ session_privs: HashSet::new(),
+ pending_amt_msat: 0,
+ payment_hash: *payment_hash,
+ payment_secret: *payment_secret,
+ starting_block_height: self.best_block.read().unwrap().height(),
+ total_msat: total_value,
+ });
+ assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat));
+
+ send_res
} {
Some((update_add, commitment_signed, monitor_update)) => {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
}
} else {
return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment with ID {} not found".to_string()
+ err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
}))
}
};
network_update: None,
all_paths_failed: payment.get().remaining_parts() == 0,
path: path.clone(),
+ short_channel_id: None,
#[cfg(test)]
error_code: None,
#[cfg(test)]
match &onion_error {
&HTLCFailReason::LightningError { ref err } => {
#[cfg(test)]
- let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
#[cfg(not(test))]
- let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
// TODO: If we decided to blame ourselves (or one of our channels) in
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
network_update,
all_paths_failed,
path: path.clone(),
+ short_channel_id,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
network_update: None,
all_paths_failed,
path: path.clone(),
+ short_channel_id: Some(path.first().unwrap().short_channel_id),
#[cfg(test)]
error_code: Some(*failure_code),
#[cfg(test)]
sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat)
} else { false };
if found_payment {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
self.pending_events.lock().unwrap().push(
- events::Event::PaymentSent { payment_preimage }
+ events::Event::PaymentSent {
+ payment_preimage,
+ payment_hash: payment_hash
+ }
);
} else {
log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
+ }
+ }
+ return res
}
funding_tx
},
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure });
+ self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
}
Ok(())
}
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxConfirmed });
+ self.issue_channel_close_events(&chan, ClosureReason::CommitmentTxConfirmed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
});
}
- if let Ok(mut pending_events_lock) = self.pending_events.lock() {
- pending_events_lock.push(events::Event::ChannelClosed {
- channel_id: *channel_id,
- reason: ClosureReason::CooperativeClosure
- });
- }
+ self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transaction(&tx);
self.process_pending_events(&event_handler);
events.into_inner()
}
+
+ #[cfg(test)]
+ pub fn has_pending_payments(&self) -> bool {
+ !self.pending_outbound_payments.lock().unwrap().is_empty()
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxConfirmed });
+ self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer });
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
false
} else {
true
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer });
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
return false;
} else {
no_channels_remain = false;
None => continue,
}
}
+ if forward_htlcs_count > 0 {
+ // If we have pending HTLCs to forward, assume we either dropped a
+ // `PendingHTLCsForwardable` or the user received it but never processed it as they
+ // shut down before the timer hit. Either way, set the time_forwardable to a small
+ // constant as enough time has likely passed that we should simply handle the forwards
+ // now, or at least after the user gets a chance to reconnect to our peers.
+ pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: Duration::from_secs(2),
+ });
+ }
let background_event_count: u64 = Readable::read(reader)?;
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
- let logger = test_utils::TestLogger::new();
// First, send a partial MPP payment.
- let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
- let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
- let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+ let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
let payment_id = PaymentId([42; 32]);
// Use the utility function send_payment_along_path to send the payment with MPP data which
// indicates there are more HTLCs coming.
// further events will be generated for subsequence path successes.
let events = nodes[0].node.get_and_clear_pending_events();
match events[0] {
- Event::PaymentSent { payment_preimage: ref preimage } => {
+ Event::PaymentSent { payment_preimage: ref preimage, payment_hash: ref hash } => {
assert_eq!(payment_preimage, *preimage);
+ assert_eq!(our_payment_hash, *hash);
},
_ => panic!("Unexpected event"),
}
let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let logger = test_utils::TestLogger::new();
// Marshall an MPP route.
- let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
- let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
- let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();