Merge pull request #3144 from TheBlueMatt/2024-06-message-flags
[rust-lightning] / lightning / src / ln / channelmanager.rs
index be2b599e6af31f8e45a7d7d17988ea2756d7defd..807dce7cd4536c2307f74df5190445549a0ebdac 100644 (file)
@@ -66,6 +66,7 @@ use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
 use crate::offers::offer::{Offer, OfferBuilder};
 use crate::offers::parse::Bolt12SemanticError;
 use crate::offers::refund::{Refund, RefundBuilder};
+use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
 use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction};
 use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
 use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
@@ -3505,7 +3506,7 @@ where
                        // peer has been disabled for some time), return `channel_disabled`,
                        // otherwise return `temporary_channel_failure`.
                        let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
-                       if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+                       if chan_update_opt.as_ref().map(|u| u.contents.channel_flags & 2 == 2).unwrap_or(false) {
                                return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
                        } else {
                                return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
@@ -3784,7 +3785,8 @@ where
                        chain_hash: self.chain_hash,
                        short_channel_id,
                        timestamp: chan.context.get_update_time_counter(),
-                       flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
+                       message_flags: 1, // Only must_be_one
+                       channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
                        cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
                        htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
@@ -4031,8 +4033,8 @@ where
                self.pending_outbound_payments
                        .send_payment_for_bolt12_invoice(
                                invoice, payment_id, &self.router, self.list_usable_channels(),
-                               || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
-                               best_block_height, &self.logger, &self.pending_events,
+                               || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+                               &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
                                |args| self.send_payment_along_path(args)
                        )
        }
@@ -6211,21 +6213,13 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
-                               let prev_hop_chan_id = htlc.prev_hop.channel_id;
-                               if let Err((pk, err)) = self.claim_funds_from_hop(
+                               self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
                                                debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
                                                Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
                                        }
-                               ) {
-                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
-                                               // We got a temporary failure updating monitor, but will claim the
-                                               // HTLC when the monitor updating is restored (or on chain).
-                                               let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
-                                               log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
-                                       } else { errs.push((pk, err)); }
-                               }
+                               );
                        }
                }
                if !valid_mpp {
@@ -6247,9 +6241,10 @@ where
                }
        }
 
-       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
-               prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
-       -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+               &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+               completion_action: ComplFunc,
+       ) {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
 
                // If we haven't yet run background events assume we're still deserializing and shouldn't
@@ -6311,7 +6306,7 @@ where
                                                                let action = if let Some(action) = completion_action(None, true) {
                                                                        action
                                                                } else {
-                                                                       return Ok(());
+                                                                       return;
                                                                };
                                                                mem::drop(peer_state_lock);
 
@@ -6327,7 +6322,7 @@ where
                                                                } else {
                                                                        debug_assert!(false,
                                                                                "Duplicate claims should always free another channel immediately");
-                                                                       return Ok(());
+                                                                       return;
                                                                };
                                                                if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
                                                                        let mut peer_state = peer_state_mtx.lock().unwrap();
@@ -6352,7 +6347,7 @@ where
                                                        }
                                                }
                                        }
-                                       return Ok(());
+                                       return;
                                }
                        }
                }
@@ -6400,7 +6395,6 @@ where
                // generally always allowed to be duplicative (and it's specifically noted in
                // `PaymentForwarded`).
                self.handle_monitor_update_completion_actions(completion_action(None, false));
-               Ok(())
        }
 
        fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -6433,7 +6427,7 @@ where
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                #[cfg(debug_assertions)]
                                let claiming_chan_funding_outpoint = hop_data.outpoint;
-                               let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+                               self.claim_funds_from_hop(hop_data, payment_preimage,
                                        |htlc_claim_value_msat, definitely_duplicate| {
                                                let chan_to_release =
                                                        if let Some(node_id) = next_channel_counterparty_node_id {
@@ -6527,10 +6521,6 @@ where
                                                        })
                                                }
                                        });
-                               if let Err((pk, err)) = res {
-                                       let result: Result<(), _> = Err(err);
-                                       let _ = handle_error!(self, result, pk);
-                               }
                        },
                }
        }
@@ -7932,7 +7922,7 @@ where
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                        }
                                        let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
-                                       let msg_from_node_one = msg.contents.flags & 1 == 0;
+                                       let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
                                        if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersistNoEvents);
                                        } else {
@@ -9704,7 +9694,7 @@ where
        }
 
        #[cfg(splicing)]
-       fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
+       fn handle_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported".to_owned(),
                         msg.channel_id.clone())), *counterparty_node_id);
@@ -9911,7 +9901,7 @@ where
                                                // Quiescence
                                                &events::MessageSendEvent::SendStfu { .. } => false,
                                                // Splicing
-                                               &events::MessageSendEvent::SendSplice { .. } => false,
+                                               &events::MessageSendEvent::SendSpliceInit { .. } => false,
                                                &events::MessageSendEvent::SendSpliceAck { .. } => false,
                                                &events::MessageSendEvent::SendSpliceLocked { .. } => false,
                                                // Interactive Transaction Construction
@@ -10389,6 +10379,17 @@ where
                                        },
                                }
                        },
+                       #[cfg(async_payments)]
+                       OffersMessage::StaticInvoice(_invoice) => {
+                               match responder {
+                                       Some(responder) => {
+                                               responder.respond(OffersMessage::InvoiceError(
+                                                               InvoiceError::from_string("Static invoices not yet supported".to_string())
+                                               ))
+                                       },
+                                       None => return ResponseInstruction::NoResponse,
+                               }
+                       },
                        OffersMessage::InvoiceError(invoice_error) => {
                                log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
                                ResponseInstruction::NoResponse
@@ -10401,6 +10402,31 @@ where
        }
 }
 
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+       T::Target: BroadcasterInterface,
+       ES::Target: EntropySource,
+       NS::Target: NodeSigner,
+       SP::Target: SignerProvider,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
+{
+       fn held_htlc_available(
+               &self, _message: HeldHtlcAvailable, _responder: Option<Responder>
+       ) -> ResponseInstruction<ReleaseHeldHtlc> {
+               ResponseInstruction::NoResponse
+       }
+
+       fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+
+       fn release_pending_messages(&self) -> Vec<PendingOnionMessage<AsyncPaymentsMessage>> {
+               Vec::new()
+       }
+}
+
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
 NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
@@ -12237,8 +12263,8 @@ mod tests {
                // update message and would always update the local fee info, even if our peer was
                // (spuriously) forwarding us our own channel_update.
                let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
-               let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
-               let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+               let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+               let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
 
                // First deliver each peers' own message, checking that the node doesn't need to be
                // persisted and that its channel info remains the same.