use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
+use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
}
} else {
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
- shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
}
},
hash_map::Entry::Vacant(_) => {
let closure_reason = if let Some(peer_msg) = peer_msg {
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
} else {
- ClosureReason::HolderForceClosed
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
};
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
invoice, payment_id, &self.router, self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
- best_block_height, &self.logger, &self.pending_events,
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+ &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
|args| self.send_payment_along_path(args)
)
}
if short_chan_id != 0 {
let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
- () => {
- for forward_info in pending_forwards.drain(..) {
+ ($forward_infos: expr) => {
+ for forward_info in $forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
- let logger = WithChannelContext::from(&self.logger, &chan.context, None);
- for forward_info in pending_forwards.drain(..) {
- let queue_fail_htlc_res = match forward_info {
- HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
- prev_user_channel_id, forward_info: PendingHTLCInfo {
- incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward {
- onion_packet, blinded, ..
- }, skimmed_fee_msat, ..
+ let mut draining_pending_forwards = pending_forwards.drain(..);
+ while let Some(forward_info) = draining_pending_forwards.next() {
+ let queue_fail_htlc_res = match forward_info {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward {
+ ref onion_packet, blinded, ..
+ }, skimmed_fee_msat, ..
+ },
+ }) => {
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ blinded_failure: blinded.map(|b| b.failure),
+ });
+ let next_blinding_point = blinded.and_then(|b| {
+ let encrypted_tlvs_ss = self.node_signer.ecdh(
+ Recipient::Node, &b.inbound_blinding_point, None
+ ).unwrap().secret_bytes();
+ onion_utils::next_hop_pubkey(
+ &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+ ).ok()
+ });
+
+ // Forward the HTLC over the most appropriate channel with the corresponding peer,
+ // applying non-strict forwarding.
+ // The channel with the least amount of outbound liquidity will be used to maximize the
+ // probability of being able to successfully forward a subsequent HTLC.
+ let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
+ ChannelPhase::Funded(chan) => {
+ let balances = chan.context.get_available_balances(&self.fee_estimator);
+ if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
+ outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
+ chan.context.is_usable() {
+ Some((chan, balances))
+ } else {
+ None
+ }
},
- }) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
- log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- user_channel_id: Some(prev_user_channel_id),
- channel_id: prev_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- blinded_failure: blinded.map(|b| b.failure),
- });
- let next_blinding_point = blinded.and_then(|b| {
- let encrypted_tlvs_ss = self.node_signer.ecdh(
- Recipient::Node, &b.inbound_blinding_point, None
- ).unwrap().secret_bytes();
- onion_utils::next_hop_pubkey(
- &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
- ).ok()
- });
- if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
- payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
- &&logger)
- {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+ _ => None,
+ }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
+ let optimal_channel = match maybe_optimal_channel {
+ Some(chan) => chan,
+ None => {
+ // Fall back to the specified channel to return an appropriate error.
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ chan
} else {
- panic!("Stated return value requirements in send_htlc() were not met");
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
+ }
+ };
+
+ let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
+ let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
+ "specified"
+ } else {
+ "alternate"
+ };
+ log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
+ prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
+ if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
+ payment_hash, outgoing_cltv_value, htlc_source.clone(),
+ onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+ &&logger)
+ {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
- continue;
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
- None
- },
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ }
+ None
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
- },
- HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id, failure_code, sha256_of_onion, &&logger
);
Some((res, htlc_id))
- },
- };
- if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
- if let Err(e) = queue_fail_htlc_res {
- if let ChannelError::Ignore(msg) = e {
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
}
}
- } else {
- forwarding_channel_not_found!();
- continue;
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
- shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.channel_id;
- if let Err((pk, err)) = self.claim_funds_from_hop(
+ self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
}
- ) {
- if let msgs::ErrorAction::IgnoreError = err.err.action {
- // We got a temporary failure updating monitor, but will claim the
- // HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
- log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
- } else { errs.push((pk, err)); }
- }
+ );
}
}
if !valid_mpp {
}
}
- fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
- prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
- -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+ &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+ completion_action: ComplFunc,
+ ) {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// If we haven't yet run background events assume we're still deserializing and shouldn't
let action = if let Some(action) = completion_action(None, true) {
action
} else {
- return Ok(());
+ return;
};
mem::drop(peer_state_lock);
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
- return Ok(());
+ return;
};
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
}
}
}
- return Ok(());
+ return;
}
}
}
// generally always allowed to be duplicative (and it's specifically noted in
// `PaymentForwarded`).
self.handle_monitor_update_completion_actions(completion_action(None, false));
- Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+ self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
})
}
});
- if let Err((pk, err)) = res {
- let result: Result<(), _> = Err(err);
- let _ = handle_error!(self, result, pk);
- }
},
}
}
log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
remaining_in_flight);
- if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
+ if !channel.is_awaiting_monitor_update() || remaining_in_flight != 0 {
return;
}
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel);
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
reason
} else {
- ClosureReason::HolderForceClosed
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
};
failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
}
#[cfg(splicing)]
- fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
+ fn handle_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported".to_owned(),
msg.channel_id.clone())), *counterparty_node_id);
// Quiescence
&events::MessageSendEvent::SendStfu { .. } => false,
// Splicing
- &events::MessageSendEvent::SendSplice { .. } => false,
+ &events::MessageSendEvent::SendSpliceInit { .. } => false,
&events::MessageSendEvent::SendSpliceAck { .. } => false,
&events::MessageSendEvent::SendSpliceLocked { .. } => false,
// Interactive Transaction Construction
},
}
},
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(_invoice) => {
+ match responder {
+ Some(responder) => {
+ responder.respond(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Static invoices not yet supported".to_string())
+ ))
+ },
+ None => return ResponseInstruction::NoResponse,
+ }
+ },
OffersMessage::InvoiceError(invoice_error) => {
log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
ResponseInstruction::NoResponse
}
}
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ fn held_htlc_available(
+ &self, _message: HeldHtlcAvailable, _responder: Option<Responder>
+ ) -> ResponseInstruction<ReleaseHeldHtlc> {
+ ResponseInstruction::NoResponse
+ }
+
+ fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+
+ fn release_pending_messages(&self) -> Vec<PendingOnionMessage<AsyncPaymentsMessage>> {
+ Vec::new()
+ }
+}
+
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
// Confirm that the channel_update was not sent immediately to node[1] but was cached.
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
let txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);