use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
+use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
// peer has been disabled for some time), return `channel_disabled`,
// otherwise return `temporary_channel_failure`.
let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
- if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+ if chan_update_opt.as_ref().map(|u| u.contents.channel_flags & 2 == 2).unwrap_or(false) {
return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
} else {
return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
chain_hash: self.chain_hash,
short_channel_id,
timestamp: chan.context.get_update_time_counter(),
- flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
+ message_flags: 1, // Only must_be_one
+ channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
invoice, payment_id, &self.router, self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
- best_block_height, &self.logger, &self.pending_events,
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+ &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
|args| self.send_payment_along_path(args)
)
}
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.channel_id;
- if let Err((pk, err)) = self.claim_funds_from_hop(
+ self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
}
- ) {
- if let msgs::ErrorAction::IgnoreError = err.err.action {
- // We got a temporary failure updating monitor, but will claim the
- // HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
- log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
- } else { errs.push((pk, err)); }
- }
+ );
}
}
if !valid_mpp {
}
}
- fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
- prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
- -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+ &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+ completion_action: ComplFunc,
+ ) {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// If we haven't yet run background events assume we're still deserializing and shouldn't
let action = if let Some(action) = completion_action(None, true) {
action
} else {
- return Ok(());
+ return;
};
mem::drop(peer_state_lock);
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
- return Ok(());
+ return;
};
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
}
}
}
- return Ok(());
+ return;
}
}
}
// generally always allowed to be duplicative (and it's specifically noted in
// `PaymentForwarded`).
self.handle_monitor_update_completion_actions(completion_action(None, false));
- Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+ self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
})
}
});
- if let Err((pk, err)) = res {
- let result: Result<(), _> = Err(err);
- let _ = handle_error!(self, result, pk);
- }
},
}
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
- let msg_from_node_one = msg.contents.flags & 1 == 0;
+ let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
}
#[cfg(splicing)]
- fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
+ fn handle_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported".to_owned(),
msg.channel_id.clone())), *counterparty_node_id);
// Quiescence
&events::MessageSendEvent::SendStfu { .. } => false,
// Splicing
- &events::MessageSendEvent::SendSplice { .. } => false,
+ &events::MessageSendEvent::SendSpliceInit { .. } => false,
&events::MessageSendEvent::SendSpliceAck { .. } => false,
&events::MessageSendEvent::SendSpliceLocked { .. } => false,
// Interactive Transaction Construction
},
}
},
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(_invoice) => {
+ match responder {
+ Some(responder) => {
+ responder.respond(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Static invoices not yet supported".to_string())
+ ))
+ },
+ None => return ResponseInstruction::NoResponse,
+ }
+ },
OffersMessage::InvoiceError(invoice_error) => {
log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
ResponseInstruction::NoResponse
}
}
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ fn held_htlc_available(
+ &self, _message: HeldHtlcAvailable, _responder: Option<Responder>
+ ) -> ResponseInstruction<ReleaseHeldHtlc> {
+ ResponseInstruction::NoResponse
+ }
+
+ fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+
+ fn release_pending_messages(&self) -> Vec<PendingOnionMessage<AsyncPaymentsMessage>> {
+ Vec::new()
+ }
+}
+
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
// update message and would always update the local fee info, even if our peer was
// (spuriously) forwarding us our own channel_update.
let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
- let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
- let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+ let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+ let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
// First deliver each peers' own message, checking that the node doesn't need to be
// persisted and that its channel info remains the same.