Merge pull request #2820 from TheBlueMatt/2024-01-fuzz-ooo-monitor-updates
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Thu, 11 Jan 2024 16:22:34 +0000 (11:22 -0500)
committerGitHub <noreply@github.com>
Thu, 11 Jan 2024 16:22:34 +0000 (11:22 -0500)
Test individual monitor update compl in chanmon_consistency fuzzer

20 files changed:
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/onion_message.rs
lightning-transaction-sync/src/electrum.rs
lightning/src/blinded_path/message.rs
lightning/src/blinded_path/utils.rs
lightning/src/events/mod.rs
lightning/src/ln/channel.rs
lightning/src/ln/channel_keys.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/peer_handler.rs
lightning/src/onion_message/functional_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/onion_message/mod.rs
lightning/src/onion_message/offers.rs
lightning/src/routing/router.rs
lightning/src/util/test_utils.rs

index 7d8f373096f73012eebe8cd1e903cccb1b96077d..89ff07fe698268a9f8fff51f1b298c124b46e73b 100644 (file)
@@ -48,7 +48,7 @@ use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
 use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
 use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::errors::APIError;
 use lightning::util::logger::Logger;
index a2ce98cf4d22c9bebaf40ff895b98f88d7b8da2e..1f5ceb2123517a5b14559ee5e82adc085fe576b7 100644 (file)
@@ -45,7 +45,7 @@ use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
 use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
 use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
index d2d60cfcf640593e2e38d264eac6eeb0c2abafad..4ad770fc20c8baa4e2a7a2cd1da8df3ff814a818 100644 (file)
@@ -16,7 +16,9 @@ use lightning::sign::{Recipient, KeyMaterial, EntropySource, NodeSigner, SignerP
 use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::logger::Logger;
 use lightning::util::ser::{Readable, Writeable, Writer};
-use lightning::onion_message::{CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessageContents, OnionMessagePath, OnionMessenger, PendingOnionMessage};
+use lightning::onion_message::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage};
+use lightning::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use lightning::onion_message::packet::OnionMessageContents;
 
 use crate::utils::test_logger;
 
index 07e11338905370d9385dc7d5259053b624055ea4..d0c8afef77e3161ec191f41a0ceae4aa5eb8efe0 100644 (file)
@@ -86,6 +86,7 @@ where
                let mut sync_state = self.sync_state.lock().unwrap();
 
                log_trace!(self.logger, "Starting transaction sync.");
+               #[cfg(feature = "time")]
                let start_time = Instant::now();
                let mut num_confirmed = 0;
                let mut num_unconfirmed = 0;
@@ -210,10 +211,15 @@ where
                                sync_state.pending_sync = false;
                        }
                }
+               #[cfg(feature = "time")]
                log_debug!(self.logger,
                        "Finished transaction sync at tip {} in {}ms: {} confirmed, {} unconfirmed.",
                        tip_header.block_hash(), start_time.elapsed().as_millis(), num_confirmed,
                        num_unconfirmed);
+               #[cfg(not(feature = "time"))]
+               log_debug!(self.logger,
+                       "Finished transaction sync at tip {}: {} confirmed, {} unconfirmed.",
+                       tip_header.block_hash(), num_confirmed, num_unconfirmed);
                Ok(())
        }
 
index d2e81444ef6307a2cac41082c444f18568af7a85..2631a05f660c4e886a6db37d9e8fa3b6e76f6708 100644 (file)
@@ -5,7 +5,7 @@ use crate::blinded_path::utils;
 use crate::io;
 use crate::io::Cursor;
 use crate::ln::onion_utils;
-use crate::onion_message::ControlTlvs;
+use crate::onion_message::packet::ControlTlvs;
 use crate::prelude::*;
 use crate::sign::{NodeSigner, Recipient};
 use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
index 2e691898dc26cc187504fee5e774d4f8ce9a4af7..7ddb39c1b68fbce0a93fca0637e604b70b393b90 100644 (file)
@@ -18,7 +18,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use super::{BlindedHop, BlindedPath};
 use crate::ln::msgs::DecodeError;
 use crate::ln::onion_utils;
-use crate::onion_message::Destination;
+use crate::onion_message::messenger::Destination;
 use crate::util::chacha20poly1305rfc::ChaChaPolyWriteAdapter;
 use crate::util::ser::{Readable, Writeable};
 
index 76e5f25c0e5f6eef87b2ff48102dd19acc239231..dede02a37cb9c5e6349dfb27e7504cc3b1fb83fc 100644 (file)
@@ -540,8 +540,8 @@ pub enum Event {
        /// replies. Handlers should connect to the node otherwise any buffered messages may be lost.
        ///
        /// [`OnionMessage`]: msgs::OnionMessage
-       /// [`MessageRouter`]: crate::onion_message::MessageRouter
-       /// [`Destination`]: crate::onion_message::Destination
+       /// [`MessageRouter`]: crate::onion_message::messenger::MessageRouter
+       /// [`Destination`]: crate::onion_message::messenger::Destination
        /// [`OnionMessageHandler`]: crate::ln::msgs::OnionMessageHandler
        ConnectionNeeded {
                /// The node id for the node needing a connection.
index 1dfc6dc552fea205a01b27ee573c2de4315e803e..b3657701a4a4b876c80694bd2496e3662a78937a 100644 (file)
@@ -814,6 +814,7 @@ pub(super) struct ReestablishResponses {
 /// The result of a shutdown that should be handled.
 #[must_use]
 pub(crate) struct ShutdownResult {
+       pub(crate) closure_reason: ClosureReason,
        /// A channel monitor update to apply.
        pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
@@ -822,7 +823,10 @@ pub(crate) struct ShutdownResult {
        /// propagated to the remainder of the batch.
        pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
        pub(crate) channel_id: ChannelId,
+       pub(crate) user_channel_id: u128,
+       pub(crate) channel_capacity_satoshis: u64,
        pub(crate) counterparty_node_id: PublicKey,
+       pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
 }
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -1833,8 +1837,6 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// will sign and send to our counterparty.
        /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
        fn build_remote_transaction_keys(&self) -> TxCreationKeys {
-               //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
-               //may see payments to it!
                let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
@@ -2313,15 +2315,17 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                res
        }
 
-       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
-               where F: Fn() -> Option<O> {
+       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
                match self.channel_state {
                        ChannelState::FundingNegotiated => f(),
-                       ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
-                               f()
-                       } else {
-                               None
-                       },
+                       ChannelState::AwaitingChannelReady(flags) =>
+                               if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
+                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
+                               {
+                                       f()
+                               } else {
+                                       None
+                               },
                        _ => None,
                }
        }
@@ -2356,7 +2360,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
        /// Also returns the list of payment_hashes for channels which we can safely fail backwards
        /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+       pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
                // Note that we MUST only generate a monitor update that indicates force-closure - we're
                // called during initialization prior to the chain_monitor in the encompassing ChannelManager
                // being fully configured in some cases. Thus, its likely any monitor events we generate will
@@ -2397,15 +2401,20 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        } else { None }
                } else { None };
                let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
+               let unbroadcasted_funding_tx = self.unbroadcasted_funding();
 
                self.channel_state = ChannelState::ShutdownComplete;
                self.update_time_counter += 1;
                ShutdownResult {
+                       closure_reason,
                        monitor_update,
                        dropped_outbound_htlcs,
                        unbroadcasted_batch_funding_txid,
                        channel_id: self.channel_id,
+                       user_channel_id: self.user_id,
+                       channel_capacity_satoshis: self.channel_value_satoshis,
                        counterparty_node_id: self.counterparty_node_id,
+                       unbroadcasted_funding_tx,
                }
        }
 
@@ -2546,26 +2555,24 @@ impl FailHTLCContents for msgs::OnionErrorPacket {
                HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
        }
 }
-impl FailHTLCContents for (u16, [u8; 32]) {
-       type Message = msgs::UpdateFailMalformedHTLC; // (failure_code, sha256_of_onion)
+impl FailHTLCContents for ([u8; 32], u16) {
+       type Message = msgs::UpdateFailMalformedHTLC;
        fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
                msgs::UpdateFailMalformedHTLC {
                        htlc_id,
                        channel_id,
-                       failure_code: self.0,
-                       sha256_of_onion: self.1
+                       sha256_of_onion: self.0,
+                       failure_code: self.1
                }
        }
        fn to_inbound_htlc_state(self) -> InboundHTLCState {
-               InboundHTLCState::LocalRemoved(
-                       InboundHTLCRemovalReason::FailMalformed((self.1, self.0))
-               )
+               InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
        }
        fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
                HTLCUpdateAwaitingACK::FailMalformedHTLC {
                        htlc_id,
-                       failure_code: self.0,
-                       sha256_of_onion: self.1
+                       sha256_of_onion: self.0,
+                       failure_code: self.1
                }
        }
 }
@@ -2892,7 +2899,7 @@ impl<SP: Deref> Channel<SP> where
        pub fn queue_fail_malformed_htlc<L: Deref>(
                &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
        ) -> Result<(), ChannelError> where L::Target: Logger {
-               self.fail_htlc(htlc_id_arg, (failure_code, sha256_of_onion), true, logger)
+               self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
        }
 
@@ -2905,7 +2912,7 @@ impl<SP: Deref> Channel<SP> where
        /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
        /// [`ChannelError::Ignore`].
        fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
-               &mut self, htlc_id_arg: u64, err_packet: E, mut force_holding_cell: bool,
+               &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
                logger: &L
        ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
@@ -2972,7 +2979,7 @@ impl<SP: Deref> Channel<SP> where
                                }
                        }
                        log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
-                       self.context.holding_cell_htlc_updates.push(err_packet.to_htlc_update_awaiting_ack(htlc_id_arg));
+                       self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
                        return Ok(None);
                }
 
@@ -2980,10 +2987,10 @@ impl<SP: Deref> Channel<SP> where
                        E::Message::name(), &self.context.channel_id());
                {
                        let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
-                       htlc.state = err_packet.clone().to_inbound_htlc_state();
+                       htlc.state = err_contents.clone().to_inbound_htlc_state();
                }
 
-               Ok(Some(err_packet.to_message(htlc_id_arg, self.context.channel_id())))
+               Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
        }
 
        // Message handlers:
@@ -3596,7 +3603,7 @@ impl<SP: Deref> Channel<SP> where
                                // the limit. In case it's less rare than I anticipate, we may want to revisit
                                // handling this case better and maybe fulfilling some of the HTLCs while attempting
                                // to rebalance channels.
-                               match &htlc_update {
+                               let fail_htlc_res = match &htlc_update {
                                        &HTLCUpdateAwaitingACK::AddHTLC {
                                                amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
                                                skimmed_fee_msat, blinding_point, ..
@@ -3624,6 +3631,7 @@ impl<SP: Deref> Channel<SP> where
                                                                }
                                                        }
                                                }
+                                               None
                                        },
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
                                                // If an HTLC claim was previously added to the holding cell (via
@@ -3637,40 +3645,33 @@ impl<SP: Deref> Channel<SP> where
                                                        { monitor_update } else { unreachable!() };
                                                update_fulfill_count += 1;
                                                monitor_update.updates.append(&mut additional_monitor_update.updates);
+                                               None
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
-                                               match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
-                                                       Ok(update_fail_msg_option) => {
-                                                               // If an HTLC failure was previously added to the holding cell (via
-                                                               // `queue_fail_htlc`) then generating the fail message itself must
-                                                               // not fail - we should never end up in a state where we double-fail
-                                                               // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
-                                                               // for a full revocation before failing.
-                                                               debug_assert!(update_fail_msg_option.is_some());
-                                                               update_fail_count += 1;
-                                                       },
-                                                       Err(e) => {
-                                                               if let ChannelError::Ignore(_) = e {}
-                                                               else {
-                                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
-                                                               }
-                                                       }
-                                               }
+                                               Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
+                                                .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
                                        },
                                        &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
-                                               match self.fail_htlc(htlc_id, (failure_code, sha256_of_onion), false, logger) {
-                                                       Ok(update_fail_malformed_opt) => {
-                                                               debug_assert!(update_fail_malformed_opt.is_some()); // See above comment
-                                                               update_fail_count += 1;
-                                                       },
-                                                       Err(e) => {
-                                                               if let ChannelError::Ignore(_) = e {}
-                                                               else {
-                                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
-                                                               }
-                                                       }
-                                               }
-                                       },
+                                               Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
+                                                .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
+                                       }
+                               };
+                               if let Some(res) = fail_htlc_res {
+                                       match res {
+                                               Ok(fail_msg_opt) => {
+                                                       // If an HTLC failure was previously added to the holding cell (via
+                                                       // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
+                                                       // not fail - we should never end up in a state where we double-fail
+                                                       // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+                                                       // for a full revocation before failing.
+                                                       debug_assert!(fail_msg_opt.is_some());
+                                                       update_fail_count += 1;
+                                               },
+                                               Err(ChannelError::Ignore(_)) => {},
+                                               Err(_) => {
+                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+                                               },
+                                       }
                                }
                        }
                        if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
@@ -4933,11 +4934,15 @@ impl<SP: Deref> Channel<SP> where
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
                                let shutdown_result = ShutdownResult {
+                                       closure_reason: ClosureReason::CooperativeClosure,
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                        channel_id: self.context.channel_id,
+                                       user_channel_id: self.context.user_id,
+                                       channel_capacity_satoshis: self.context.channel_value_satoshis,
                                        counterparty_node_id: self.context.counterparty_node_id,
+                                       unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
                                self.context.channel_state = ChannelState::ShutdownComplete;
@@ -4963,11 +4968,15 @@ impl<SP: Deref> Channel<SP> where
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
+                                                               closure_reason: ClosureReason::CooperativeClosure,
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                                                channel_id: self.context.channel_id,
+                                                               user_channel_id: self.context.user_id,
+                                                               channel_capacity_satoshis: self.context.channel_value_satoshis,
                                                                counterparty_node_id: self.context.counterparty_node_id,
+                                                               unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
                                                        };
                                                        self.context.channel_state = ChannelState::ShutdownComplete;
                                                        self.context.update_time_counter += 1;
index f737dd23407727786d669b5ca1cbc50776cc1251..b577dc60008583537c4d9c1cebc8b2f6e5f48e77 100644 (file)
@@ -25,126 +25,125 @@ use bitcoin::secp256k1::PublicKey;
 use bitcoin::hashes::sha256::Hash as Sha256;
 
 macro_rules! doc_comment {
-    ($x:expr, $($tt:tt)*) => {
-        #[doc = $x]
-        $($tt)*
-    };
+       ($x:expr, $($tt:tt)*) => {
+               #[doc = $x]
+               $($tt)*
+       };
 }
 macro_rules! basepoint_impl {
-    ($BasepointT:ty) => {
-        impl $BasepointT {
-            /// Get inner Public Key
-            pub fn to_public_key(&self) -> PublicKey {
-                self.0
-            }
-        }
-        
-        impl From<PublicKey> for $BasepointT {
-            fn from(value: PublicKey) -> Self {
-                Self(value)
-            }
-        }
-        
-    }
+       ($BasepointT:ty) => {
+               impl $BasepointT {
+                       /// Get inner Public Key
+                       pub fn to_public_key(&self) -> PublicKey {
+                               self.0
+                       }
+               }
+
+               impl From<PublicKey> for $BasepointT {
+                       fn from(value: PublicKey) -> Self {
+                               Self(value)
+                       }
+               }
+
+       }
 }
 macro_rules! key_impl {
-    ($BasepointT:ty, $KeyName:expr) => {
-        doc_comment! {
-            concat!("Generate ", $KeyName, " using per_commitment_point"),
-            pub fn from_basepoint<T: secp256k1::Signing>(
-                secp_ctx: &Secp256k1<T>,
-                basepoint: &$BasepointT,
-                per_commitment_point: &PublicKey,
-            ) -> Self {
-                Self(derive_public_key(secp_ctx, per_commitment_point, &basepoint.0))
-            }
-        }
-        
-        doc_comment! {
-            concat!("Generate ", $KeyName, " from privkey"),
-            pub fn from_secret_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, sk: &SecretKey) -> Self {
-                Self(PublicKey::from_secret_key(&secp_ctx, &sk))
-            }
-        }
-        
-        /// Get inner Public Key
-        pub fn to_public_key(&self) -> PublicKey {
-            self.0
-        }
-    }
+       ($BasepointT:ty, $KeyName:expr) => {
+               doc_comment! {
+                       concat!("Derive a public ", $KeyName, " using one node's `per_commitment_point` and its countersignatory's `basepoint`"),
+                       pub fn from_basepoint<T: secp256k1::Signing>(
+                               secp_ctx: &Secp256k1<T>,
+                               countersignatory_basepoint: &$BasepointT,
+                               per_commitment_point: &PublicKey,
+                       ) -> Self {
+                               Self(derive_public_key(secp_ctx, per_commitment_point, &countersignatory_basepoint.0))
+                       }
+               }
+
+               doc_comment! {
+                       concat!("Build a ", $KeyName, " directly from an already-derived private key"),
+                       pub fn from_secret_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, sk: &SecretKey) -> Self {
+                               Self(PublicKey::from_secret_key(&secp_ctx, &sk))
+                       }
+               }
+
+               /// Get inner Public Key
+               pub fn to_public_key(&self) -> PublicKey {
+                       self.0
+               }
+       }
 }
 macro_rules! key_read_write {
-    ($SelfT:ty) => {
-        impl Writeable for $SelfT {
-            fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
-                self.0.serialize().write(w)
-            }
-        }
-        
-        impl Readable for $SelfT {
-            fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
-                let key: PublicKey = Readable::read(r)?;
-                Ok(Self(key))
-            }
-        }
-    }
+       ($SelfT:ty) => {
+               impl Writeable for $SelfT {
+                       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+                               self.0.serialize().write(w)
+                       }
+               }
+
+               impl Readable for $SelfT {
+                       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+                               let key: PublicKey = Readable::read(r)?;
+                               Ok(Self(key))
+                       }
+               }
+       }
 }
 
 
 
-/// Master key used in conjunction with per_commitment_point to generate [`local_delayedpubkey`](https://github.com/lightning/bolts/blob/master/03-transactions.md#key-derivation) for the latest state of a channel.
-/// A watcher can be given a [DelayedPaymentBasepoint] to generate per commitment [DelayedPaymentKey] to create justice transactions.
+/// Base key used in conjunction with a `per_commitment_point` to generate a [`DelayedPaymentKey`].
+///
+/// The delayed payment key is used to pay the commitment state broadcaster their
+/// non-HTLC-encumbered funds after a delay to give their counterparty a chance to punish if the
+/// state broadcasted was previously revoked.
 #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
 pub struct DelayedPaymentBasepoint(pub PublicKey);
 basepoint_impl!(DelayedPaymentBasepoint);
 key_read_write!(DelayedPaymentBasepoint);
 
-/// [delayedpubkey](https://github.com/lightning/bolts/blob/master/03-transactions.md#localpubkey-local_htlcpubkey-remote_htlcpubkey-local_delayedpubkey-and-remote_delayedpubkey-derivation)
-/// To allow a counterparty to contest a channel state published by a node, Lightning protocol sets delays for some of the outputs, before can be spend.
-/// For example a commitment transaction has to_local output encumbered by a delay, negotiated at the channel establishment flow.
-/// To spend from such output a node has to generate a script using, among others, a local delayed payment key.
+
+/// A derived key built from a [`DelayedPaymentBasepoint`] and `per_commitment_point`.
+///
+/// The delayed payment key is used to pay the commitment state broadcaster their
+/// non-HTLC-encumbered funds after a delay. This delay gives their counterparty a chance to
+/// punish and claim all the channel funds if the state broadcasted was previously revoked.
+///
+/// [See the BOLT specs]
+/// (https://github.com/lightning/bolts/blob/master/03-transactions.md#localpubkey-local_htlcpubkey-remote_htlcpubkey-local_delayedpubkey-and-remote_delayedpubkey-derivation)
+/// for more information on key derivation details.
 #[derive(PartialEq, Eq, Clone, Copy, Debug)]
 pub struct DelayedPaymentKey(pub PublicKey);
 
 impl DelayedPaymentKey {
-    key_impl!(DelayedPaymentBasepoint, "delayedpubkey");
+       key_impl!(DelayedPaymentBasepoint, "delayedpubkey");
 }
 key_read_write!(DelayedPaymentKey);
 
-/// Master key used in conjunction with per_commitment_point to generate a [localpubkey](https://github.com/lightning/bolts/blob/master/03-transactions.md#key-derivation) for the latest state of a channel.
-/// Also used to generate a commitment number in a commitment transaction or as a Payment Key for a remote node (not us) in an anchor output if `option_static_remotekey` is enabled.
-/// Shared by both nodes in a channel establishment message flow.
-#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
-pub struct PaymentBasepoint(pub PublicKey);
-basepoint_impl!(PaymentBasepoint);
-key_read_write!(PaymentBasepoint);
-
-
-/// [localpubkey](https://github.com/lightning/bolts/blob/master/03-transactions.md#localpubkey-local_htlcpubkey-remote_htlcpubkey-local_delayedpubkey-and-remote_delayedpubkey-derivation) is a child key of a payment basepoint,
-/// that enables a secure hash-lock for off-chain payments without risk of funds getting stuck or stolen. A payment key is normally shared with a counterparty so that it can generate 
-/// a commitment transaction's to_remote ouput, which our node can claim in case the counterparty force closes the channel.
-#[derive(PartialEq, Eq, Clone, Copy, Debug)]
-pub struct PaymentKey(pub PublicKey);
-
-impl PaymentKey {
-    key_impl!(PaymentBasepoint, "localpubkey");
-}
-key_read_write!(PaymentKey);
-
-/// Master key used in conjunction with per_commitment_point to generate [htlcpubkey](https://github.com/lightning/bolts/blob/master/03-transactions.md#key-derivation) for the latest state of a channel.
+/// Base key used in conjunction with a `per_commitment_point` to generate an [`HtlcKey`].
+///
+/// HTLC keys are used to ensure only the recipient of an HTLC can claim it on-chain with the HTLC
+/// preimage and that only the sender of an HTLC can claim it on-chain after it has timed out.
+/// Thus, both channel counterparties' HTLC keys will appears in each HTLC output's script.
 #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
 pub struct HtlcBasepoint(pub PublicKey);
 basepoint_impl!(HtlcBasepoint);
 key_read_write!(HtlcBasepoint);
 
-
-/// [htlcpubkey](https://github.com/lightning/bolts/blob/master/03-transactions.md#localpubkey-local_htlcpubkey-remote_htlcpubkey-local_delayedpubkey-and-remote_delayedpubkey-derivation) is a child key of an htlc basepoint,
-/// that enables secure routing of payments in onion scheme without a risk of them getting stuck or diverted. It is used to claim the funds in successful or timed out htlc outputs.
+/// A derived key built from a [`HtlcBasepoint`] and `per_commitment_point`.
+///
+/// HTLC keys are used to ensure only the recipient of an HTLC can claim it on-chain with the HTLC
+/// preimage and that only the sender of an HTLC can claim it on-chain after it has timed out.
+/// Thus, both channel counterparties' HTLC keys will appears in each HTLC output's script.
+///
+/// [See the BOLT specs]
+/// (https://github.com/lightning/bolts/blob/master/03-transactions.md#localpubkey-local_htlcpubkey-remote_htlcpubkey-local_delayedpubkey-and-remote_delayedpubkey-derivation)
+/// for more information on key derivation details.
 #[derive(PartialEq, Eq, Clone, Copy, Debug)]
 pub struct HtlcKey(pub PublicKey);
 
 impl HtlcKey {
-    key_impl!(HtlcBasepoint, "htlcpubkey");
+       key_impl!(HtlcBasepoint, "htlcpubkey");
 }
 key_read_write!(HtlcKey);
 
@@ -156,7 +155,6 @@ fn derive_public_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, per_commitm
        sha.input(&per_commitment_point.serialize());
        sha.input(&base_point.serialize());
        let res = Sha256::from_engine(sha).to_byte_array();
-    
 
        let hashkey = PublicKey::from_secret_key(&secp_ctx,
                &SecretKey::from_slice(&res).expect("Hashes should always be valid keys unless SHA-256 is broken"));
@@ -172,68 +170,71 @@ basepoint_impl!(RevocationBasepoint);
 key_read_write!(RevocationBasepoint);
 
 
-/// [htlcpubkey](https://github.com/lightning/bolts/blob/master/03-transactions.md#localpubkey-local_htlcpubkey-remote_htlcpubkey-local_delayedpubkey-and-remote_delayedpubkey-derivation) is a child key of a revocation basepoint,
-/// that enables a node to create a justice transaction punishing a counterparty for an attempt to steal funds. Used to in generation of commitment and htlc outputs.
+/// The revocation key is used to allow a channel party to revoke their state - giving their
+/// counterparty the required material to claim all of their funds if they broadcast that state.
+///
+/// Each commitment transaction has a revocation key based on the basepoint and
+/// per_commitment_point which is used in both commitment and HTLC transactions.
+///
+/// See [the BOLT spec for derivation details]
+/// (https://github.com/lightning/bolts/blob/master/03-transactions.md#revocationpubkey-derivation)
 #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
 pub struct RevocationKey(pub PublicKey);
 
 impl RevocationKey {
-    /// Derives a per-commitment-transaction revocation public key from its constituent parts. This is
-    /// the public equivalend of derive_private_revocation_key - using only public keys to derive a
-    /// public key instead of private keys.
-    ///
-    /// Only the cheating participant owns a valid witness to propagate a revoked
-    /// commitment transaction, thus per_commitment_point always come from cheater
-    /// and revocation_base_point always come from punisher, which is the broadcaster
-    /// of the transaction spending with this key knowledge.
-    ///
-    /// Note that this is infallible iff we trust that at least one of the two input keys are randomly
-    /// generated (ie our own).
-    pub fn from_basepoint<T: secp256k1::Verification>(
-        secp_ctx: &Secp256k1<T>,
-        basepoint: &RevocationBasepoint,
-        per_commitment_point: &PublicKey,
-    ) -> Self {
-        let rev_append_commit_hash_key = {
-            let mut sha = Sha256::engine();
-            sha.input(&basepoint.to_public_key().serialize());
-            sha.input(&per_commitment_point.serialize());
-    
-            Sha256::from_engine(sha).to_byte_array()
-        };
-        let commit_append_rev_hash_key = {
-            let mut sha = Sha256::engine();
-            sha.input(&per_commitment_point.serialize());
-            sha.input(&basepoint.to_public_key().serialize());
-    
-            Sha256::from_engine(sha).to_byte_array()
-        };
-    
-        let countersignatory_contrib = basepoint.to_public_key().mul_tweak(&secp_ctx, &Scalar::from_be_bytes(rev_append_commit_hash_key).unwrap())
-            .expect("Multiplying a valid public key by a hash is expected to never fail per secp256k1 docs");
-        let broadcaster_contrib = (&per_commitment_point).mul_tweak(&secp_ctx, &Scalar::from_be_bytes(commit_append_rev_hash_key).unwrap())
-            .expect("Multiplying a valid public key by a hash is expected to never fail per secp256k1 docs");
-        let pk = countersignatory_contrib.combine(&broadcaster_contrib)
-            .expect("Addition only fails if the tweak is the inverse of the key. This is not possible when the tweak commits to the key.");
-        Self(pk)
-    }
-
-    /// Get inner Public Key
-    pub fn to_public_key(&self) -> PublicKey {
-        self.0
-    }
+       /// Derives a per-commitment-transaction revocation public key from one party's per-commitment
+       /// point and the other party's [`RevocationBasepoint`]. This is the public equivalent of
+       /// [`chan_utils::derive_private_revocation_key`] - using only public keys to derive a public
+       /// key instead of private keys.
+       ///
+       /// Note that this is infallible iff we trust that at least one of the two input keys are randomly
+       /// generated (ie our own).
+       ///
+       /// [`chan_utils::derive_private_revocation_key`]: crate::ln::chan_utils::derive_private_revocation_key
+       pub fn from_basepoint<T: secp256k1::Verification>(
+               secp_ctx: &Secp256k1<T>,
+               countersignatory_basepoint: &RevocationBasepoint,
+               per_commitment_point: &PublicKey,
+       ) -> Self {
+               let rev_append_commit_hash_key = {
+                       let mut sha = Sha256::engine();
+                       sha.input(&countersignatory_basepoint.to_public_key().serialize());
+                       sha.input(&per_commitment_point.serialize());
+
+                       Sha256::from_engine(sha).to_byte_array()
+               };
+               let commit_append_rev_hash_key = {
+                       let mut sha = Sha256::engine();
+                       sha.input(&per_commitment_point.serialize());
+                       sha.input(&countersignatory_basepoint.to_public_key().serialize());
+
+                       Sha256::from_engine(sha).to_byte_array()
+               };
+
+               let countersignatory_contrib = countersignatory_basepoint.to_public_key().mul_tweak(&secp_ctx, &Scalar::from_be_bytes(rev_append_commit_hash_key).unwrap())
+                       .expect("Multiplying a valid public key by a hash is expected to never fail per secp256k1 docs");
+               let broadcaster_contrib = (&per_commitment_point).mul_tweak(&secp_ctx, &Scalar::from_be_bytes(commit_append_rev_hash_key).unwrap())
+                       .expect("Multiplying a valid public key by a hash is expected to never fail per secp256k1 docs");
+               let pk = countersignatory_contrib.combine(&broadcaster_contrib)
+                       .expect("Addition only fails if the tweak is the inverse of the key. This is not possible when the tweak commits to the key.");
+               Self(pk)
+       }
+
+       /// Get inner Public Key
+       pub fn to_public_key(&self) -> PublicKey {
+               self.0
+       }
 }
 key_read_write!(RevocationKey);
 
 
-
 #[cfg(test)]
 mod test {
-    use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
-    use bitcoin::hashes::hex::FromHex;
-    use super::derive_public_key;
+       use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
+       use bitcoin::hashes::hex::FromHex;
+       use super::derive_public_key;
 
-    #[test]
+       #[test]
        fn test_key_derivation() {
                // Test vectors from BOLT 3 Appendix E:
                let secp_ctx = Secp256k1::new();
@@ -248,6 +249,6 @@ mod test {
                assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
 
                assert_eq!(derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
-                               <Vec<u8>>::from_hex("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
+                       <Vec<u8>>::from_hex("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
        }
 }
index 80d129bbe6fc045bc5300a973359be64e0cc4eb6..db2e160430313b1dd3a80e12b1267ba8006bafd3 100644 (file)
@@ -63,7 +63,8 @@ use crate::offers::merkle::SignError;
 use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
 use crate::offers::parse::Bolt12SemanticError;
 use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
 use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
@@ -550,9 +551,8 @@ impl Into<u16> for FailureCode {
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
-       chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
+       closes_channel: bool,
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
-       channel_capacity: Option<u64>,
 }
 impl MsgHandleErrInternal {
        #[inline]
@@ -567,17 +567,16 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
-                       chan_id: None,
+                       closes_channel: false,
                        shutdown_finish: None,
-                       channel_capacity: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
-               Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
+               Self { err, closes_channel: false, shutdown_finish: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
                let action = if shutdown_res.monitor_update.is_some() {
                        // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
@@ -589,9 +588,8 @@ impl MsgHandleErrInternal {
                };
                Self {
                        err: LightningError { err, action },
-                       chan_id: Some((channel_id, user_channel_id)),
+                       closes_channel: true,
                        shutdown_finish: Some((shutdown_res, channel_update)),
-                       channel_capacity: Some(channel_capacity)
                }
        }
        #[inline]
@@ -622,14 +620,13 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
-                       chan_id: None,
+                       closes_channel: false,
                        shutdown_finish: None,
-                       channel_capacity: None,
                }
        }
 
        fn closes_channel(&self) -> bool {
-               self.chan_id.is_some()
+               self.closes_channel
        }
 }
 
@@ -1961,30 +1958,27 @@ macro_rules! handle_error {
 
                match $internal {
                        Ok(msg) => Ok(msg),
-                       Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
+                       Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
                                let mut msg_events = Vec::with_capacity(2);
 
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
+                                       let counterparty_node_id = shutdown_res.counterparty_node_id;
+                                       let channel_id = shutdown_res.channel_id;
+                                       let logger = WithContext::from(
+                                               &$self.logger, Some(counterparty_node_id), Some(channel_id),
+                                       );
+                                       log_error!(logger, "Force-closing channel: {}", err.err);
+
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
                                                msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
-                                       if let Some((channel_id, user_channel_id)) = chan_id {
-                                               $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
-                                                       channel_id, user_channel_id,
-                                                       reason: ClosureReason::ProcessingError { err: err.err.clone() },
-                                                       counterparty_node_id: Some($counterparty_node_id),
-                                                       channel_capacity_sats: channel_capacity,
-                                               }, None));
-                                       }
+                               } else {
+                                       log_error!($self.logger, "Got non-closing error: {}", err.err);
                                }
 
-                               let logger = WithContext::from(
-                                       &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
-                               );
-                               log_error!(logger, "{}", err.err);
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
                                        msg_events.push(events::MessageSendEvent::HandleError {
@@ -2044,12 +2038,11 @@ macro_rules! convert_chan_phase_err {
                                let logger = WithChannelContext::from(&$self.logger, &$channel.context);
                                log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
-                               let shutdown_res = $channel.context.force_shutdown(true);
-                               let user_id = $channel.context.get_user_id();
-                               let channel_capacity_satoshis = $channel.context.get_value_satoshis();
-
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
-                                       shutdown_res, $channel_update, channel_capacity_satoshis))
+                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
+                               let shutdown_res = $channel.context.force_shutdown(true, reason);
+                               let err =
+                                       MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
+                               (true, err)
                        },
                }
        };
@@ -2706,26 +2699,6 @@ where
                        .collect()
        }
 
-       /// Helper function that issues the channel close events
-       fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
-               let mut pending_events_lock = self.pending_events.lock().unwrap();
-               match context.unbroadcasted_funding() {
-                       Some(transaction) => {
-                               pending_events_lock.push_back((events::Event::DiscardFunding {
-                                       channel_id: context.channel_id(), transaction
-                               }, None));
-                       },
-                       None => {},
-               }
-               pending_events_lock.push_back((events::Event::ChannelClosed {
-                       channel_id: context.channel_id(),
-                       user_channel_id: context.get_user_id(),
-                       reason: closure_reason,
-                       counterparty_node_id: Some(context.get_counterparty_node_id()),
-                       channel_capacity_sats: Some(context.get_value_satoshis()),
-               }, None));
-       }
-
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
@@ -2767,9 +2740,8 @@ where
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                        } else {
-                                               self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
                                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
-                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
@@ -2866,7 +2838,9 @@ where
                let logger = WithContext::from(
                        &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
                );
-               log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+
+               log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
+                       shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
@@ -2891,8 +2865,7 @@ where
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
                                                update_maps_on_chan_removal!(self, &chan.context());
-                                               self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
-                                               shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                               shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
                                        }
                                }
                                has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
@@ -2902,6 +2875,23 @@ where
                                "Closing a batch where all channels have completed initial monitor update",
                        );
                }
+
+               {
+                       let mut pending_events = self.pending_events.lock().unwrap();
+                       pending_events.push_back((events::Event::ChannelClosed {
+                               channel_id: shutdown_res.channel_id,
+                               user_channel_id: shutdown_res.user_channel_id,
+                               reason: shutdown_res.closure_reason,
+                               counterparty_node_id: Some(shutdown_res.counterparty_node_id),
+                               channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+                       }, None));
+
+                       if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
+                               pending_events.push_back((events::Event::DiscardFunding {
+                                       channel_id: shutdown_res.channel_id, transaction
+                               }, None));
+                       }
+               }
                for shutdown_result in shutdown_results.drain(..) {
                        self.finish_close_channel(shutdown_result);
                }
@@ -2924,17 +2914,16 @@ where
                        let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
                                log_error!(logger, "Force-closing channel {}", channel_id);
-                               self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
                                mem::drop(peer_state);
                                mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
-                                               self.finish_close_channel(chan.context.force_shutdown(broadcast));
+                                               self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
                                                (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
                                        },
                                        ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
-                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
+                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
@@ -3755,7 +3744,7 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let funding_txo;
-               let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+               let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
                        Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
                                funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
@@ -3763,10 +3752,9 @@ where
                                let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
-                                               let user_id = chan.context.get_user_id();
-                                               let shutdown_res = chan.context.force_shutdown(false);
-                                               let channel_capacity = chan.context.get_value_satoshis();
-                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
+                                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
+                                               let shutdown_res = chan.context.force_shutdown(false, reason);
+                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (chan, funding_msg),
@@ -3806,8 +3794,20 @@ where
                        },
                        hash_map::Entry::Vacant(e) => {
                                let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
-                               if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() {
-                                       panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible");
+                               match outpoint_to_peer.entry(funding_txo) {
+                                       hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
+                                       hash_map::Entry::Occupied(o) => {
+                                               let err = format!(
+                                                       "An existing channel using outpoint {} is open with peer {}",
+                                                       funding_txo, o.get()
+                                               );
+                                               mem::drop(outpoint_to_peer);
+                                               mem::drop(peer_state_lock);
+                                               mem::drop(per_peer_state);
+                                               let reason = ClosureReason::ProcessingError { err: err.clone() };
+                                               self.finish_close_channel(chan.context.force_shutdown(true, reason));
+                                               return Err(APIError::ChannelUnavailable { err });
+                                       }
                                }
                                e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
@@ -3971,8 +3971,8 @@ where
                                                .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
                                                .map(|mut chan| {
                                                        update_maps_on_chan_removal!(self, &chan.context());
-                                                       self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
-                                                       shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                                       let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
+                                                       shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
                                                });
                                }
                        }
@@ -4351,7 +4351,7 @@ where
                                        if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
                                                let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                for forward_info in pending_forwards.drain(..) {
-                                                       match forward_info {
+                                                       let queue_fail_htlc_res = match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
                                                                        forward_info: PendingHTLCInfo {
@@ -4397,40 +4397,35 @@ where
                                                                                ));
                                                                                continue;
                                                                        }
+                                                                       None
                                                                },
                                                                HTLCForwardInfo::AddHTLC { .. } => {
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
                                                                        log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       if let Err(e) = chan.queue_fail_htlc(
-                                                                               htlc_id, err_packet, &&logger
-                                                                       ) {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                               } else {
-                                                                                       panic!("Stated return value requirements in queue_fail_htlc() were not met");
-                                                                               }
-                                                                               // fail-backs are best-effort, we probably already have one
-                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                               continue;
-                                                                       }
+                                                                       Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
                                                                },
                                                                HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
-                                                                       log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                               } else {
-                                                                                       panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
-                                                                               }
-                                                                               // fail-backs are best-effort, we probably already have one
-                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                               continue;
-                                                                       }
+                                                                       log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       let res = chan.queue_fail_malformed_htlc(
+                                                                               htlc_id, failure_code, sha256_of_onion, &&logger
+                                                                       );
+                                                                       Some((res, htlc_id))
                                                                },
+                                                       };
+                                                       if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+                                                               if let Err(e) = queue_fail_htlc_res {
+                                                                       if let ChannelError::Ignore(msg) = e {
+                                                                               log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                       } else {
+                                                                               panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
+                                                                       }
+                                                                       // fail-backs are best-effort, we probably already have one
+                                                                       // pending, and if not that's OK, if not, the channel is on
+                                                                       // the chain and sending the HTLC-Timeout is their problem.
+                                                                       continue;
+                                                               }
                                                        }
                                                }
                                        } else {
@@ -4894,8 +4889,7 @@ where
                                        log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
-                                       self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
-                                       shutdown_channels.push(context.force_shutdown(false));
+                                       shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
@@ -6002,13 +5996,20 @@ where
        }
 
        fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
                        self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+               .ok_or_else(|| {
+                       let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); 
+                       log_error!(logger, "{}", err_str);
+
+                       APIError::ChannelUnavailable { err: err_str } 
+               })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let is_only_peer_channel = peer_state.total_channel_count() == 1;
@@ -6023,9 +6024,19 @@ where
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
-                                       &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
+                                       &self.logger, accept_0conf).map_err(|e| {
+                                               let err_str = e.to_string();
+                                               log_error!(logger, "{}", err_str);
+
+                                               APIError::ChannelUnavailable { err: err_str }
+                                       })
+                               }
+                       _ => { 
+                               let err_str = "No such channel awaiting to be accepted.".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               Err(APIError::APIMisuseError { err: err_str })
                        }
-                       _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
                }?;
 
                if accept_0conf {
@@ -6039,7 +6050,10 @@ where
                                }
                        };
                        peer_state.pending_msg_events.push(send_msg_err_event);
-                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                       let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+                       log_error!(logger, "{}", err_str);
+
+                       return Err(APIError::APIMisuseError { err: err_str });
                } else {
                        // If this peer already has some channels, a new channel won't increase our number of peers
                        // with unfunded channels, so as long as we aren't over the maximum number of unfunded
@@ -6052,7 +6066,10 @@ where
                                        }
                                };
                                peer_state.pending_msg_events.push(send_msg_err_event);
-                               return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+                               let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               return Err(APIError::APIMisuseError { err: err_str });
                        }
                }
 
@@ -6386,7 +6403,7 @@ where
                                        let res =
                                                chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
                                        match res {
-                                               Ok((chan, monitor)) => {
+                                               Ok((mut chan, monitor)) => {
                                                        if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
                                                                // We really should be able to insert here without doing a second
                                                                // lookup, but sadly rust stdlib doesn't currently allow keeping
@@ -6398,6 +6415,11 @@ where
                                                                Ok(())
                                                        } else {
                                                                let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               // We weren't able to watch the channel to begin with, so no
+                                                               // updates should be made on it. Previously, full_stack_target
+                                                               // found an (unreachable) panic when the monitor update contained
+                                                               // within `shutdown_finish` was applied.
+                                                               chan.unset_funding_info(msg.channel_id);
                                                                return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
                                                        }
                                                },
@@ -6520,9 +6542,8 @@ where
                                                let context = phase.context_mut();
                                                let logger = WithChannelContext::from(&self.logger, context);
                                                log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
-                                               self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
-                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false));
+                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
                                }
                        } else {
@@ -6591,7 +6612,6 @@ where
                                        msg: update
                                });
                        }
-                       self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
                }
                mem::drop(per_peer_state);
                if let Some(shutdown_result) = shutdown_result {
@@ -7243,13 +7263,12 @@ where
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                                               failed_channels.push(chan.context.force_shutdown(false));
+                                                                               failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
-                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
@@ -7436,8 +7455,6 @@ where
                                                                                        });
                                                                                }
 
-                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
-
                                                                                log_info!(logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
@@ -8450,14 +8467,13 @@ where
                                                                update_maps_on_chan_removal!(self, &channel.context);
                                                                // It looks like our counterparty went on-chain or funding transaction was
                                                                // reorged out of the main chain. Close the channel.
-                                                               failed_channels.push(channel.context.force_shutdown(true));
+                                                               let reason_message = format!("{}", reason);
+                                                               failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
-                                                               let reason_message = format!("{}", reason);
-                                                               self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
                                                                        action: msgs::ErrorAction::DisconnectPeer {
@@ -8855,8 +8871,7 @@ where
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
-                                       self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
-                                       failed_channels.push(context.force_shutdown(false));
+                                       failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
@@ -10296,7 +10311,7 @@ where
                                                log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
-                                       let mut shutdown_result = channel.context.force_shutdown(true);
+                                       let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
                                        if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
@@ -10358,7 +10373,7 @@ where
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
                                // was in-progress, we never broadcasted the funding transaction and can still
                                // safely discard the channel.
-                               let _ = channel.context.force_shutdown(false);
+                               let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
                                channel_closures.push_back((events::Event::ChannelClosed {
                                        channel_id: channel.context.channel_id(),
                                        user_channel_id: channel.context.get_user_id(),
index a2d9631716c4c23098135a8f95ee37a1fc5a6949..2adfe2f9379c04c6352a709ff2471232f87d6141 100644 (file)
@@ -1218,7 +1218,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, r
        (tx, as_channel_ready.channel_id)
 }
 
-pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> Transaction {
+pub fn exchange_open_accept_chan<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> ChannelId {
        let create_chan_id = node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None).unwrap();
        let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id());
        assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
@@ -1238,6 +1238,11 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
        node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_channel_msg);
        assert_ne!(node_b.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 0);
 
+       create_chan_id
+}
+
+pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> Transaction {
+       let create_chan_id = exchange_open_accept_chan(node_a, node_b, channel_value, push_msat);
        sign_funding_transaction(node_a, node_b, channel_value, create_chan_id)
 }
 
index 2ad53faa8b2e129874cbec1391156ab58aabc6e5..78207c57b45f6aa25e153d61a6dd7778c617b42f 100644 (file)
@@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
        //Should produce and error.
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1);
+       nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
@@ -1617,7 +1617,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
@@ -1796,7 +1796,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1);
+       nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
@@ -3328,22 +3328,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
-       match events[0] {
-               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
-               _ => panic!("Unexepected event"),
-       }
-       match events[1] {
-               Event::PaymentPathFailed { ref payment_hash, .. } => {
-                       assert_eq!(*payment_hash, fourth_payment_hash);
-               },
-               _ => panic!("Unexpected event"),
-       }
-       match events[2] {
-               Event::PaymentFailed { ref payment_hash, .. } => {
-                       assert_eq!(*payment_hash, fourth_payment_hash);
-               },
-               _ => panic!("Unexpected event"),
-       }
+       assert!(events.iter().any(|ev| matches!(
+               ev,
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
+       )));
+       assert!(events.iter().any(|ev| matches!(
+               ev,
+               Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+       )));
+       assert!(events.iter().any(|ev| matches!(
+               ev,
+               Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+       )));
 
        nodes[1].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[1], 1);
@@ -6299,7 +6295,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        updates.update_add_htlcs[0].amount_msat = 0;
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1);
+       nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
@@ -8981,6 +8977,101 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
        }
 }
 
+#[test]
+fn test_peer_funding_sidechannel() {
+       // Test that if a peer somehow learns which txid we'll use for our channel funding before we
+       // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
+       // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
+       // the txid and panicked if the peer tried to open a redundant channel to us with the same
+       // funding outpoint.
+       //
+       // While this assumption is generally safe, some users may have out-of-band protocols where
+       // they notify their LSP about a funding outpoint first, or this may be violated in the future
+       // with collaborative transaction construction protocols, i.e. dual-funding.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+       let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
+
+       let (_, tx, funding_output) =
+               create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+       let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
+       assert_eq!(cs_funding_events.len(), 1);
+       match cs_funding_events[0] {
+               Event::FundingGenerationReady { .. } => {}
+               _ => panic!("Unexpected event {:?}", cs_funding_events),
+       }
+
+       nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
+       let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
+       get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
+       expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
+       check_added_monitors!(nodes[0], 1);
+
+       let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
+       let err_msg = format!("{:?}", res.unwrap_err());
+       assert!(err_msg.contains("An existing channel using outpoint "));
+       assert!(err_msg.contains(" is open with peer"));
+       // Even though the last funding_transaction_generated errored, it still generated a
+       // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
+       // appropriate error message.
+       let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+       let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
+       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
+
+       let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+       get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
+}
+
+#[test]
+fn test_duplicate_conflicting_funding_from_second_peer() {
+       // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
+       // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
+       // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
+       // implies the user (and our counterparty) has reused cryptographic keys across channels, which
+       // we require the user not do.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+
+       let (_, tx, funding_output) =
+               create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+       // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
+       // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
+       let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
+       let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
+       nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
+
+       nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+
+       let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       // At this point, the channel should be closed, after having generated one monitor write (the
+       // watch_channel call which failed), but zero monitor updates.
+       check_added_monitors!(nodes[0], 1);
+       get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
+       let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
+       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
+}
+
 #[test]
 fn test_duplicate_funding_err_in_funding() {
        // Test that if we have a live channel with one peer, then another peer comes along and tries
@@ -9131,16 +9222,16 @@ fn test_duplicate_chan_id() {
                                chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
                        },
                        _ => panic!("Unexpected ChannelPhase variant"),
-               }
+               }.unwrap()
        };
        check_added_monitors!(nodes[0], 0);
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap());
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        // At this point we'll look up if the channel_id is present and immediately fail the channel
        // without trying to persist the `ChannelMonitor`.
        check_added_monitors!(nodes[1], 0);
 
        check_closed_events(&nodes[1], &[
-               ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError {
+               ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
                        err: "Already had channel with the new channel_id".to_owned()
                })
        ]);
index 2e56e2fc2c0c95d2e3cb3380d1f8b50138404ead..83f5a861559a7d443729afb52db543f2e925ed20 100644 (file)
@@ -695,7 +695,7 @@ pub struct OnionMessage {
        /// Used in decrypting the onion packet's payload.
        pub blinding_point: PublicKey,
        /// The full onion packet including hop data, pubkey, and hmac
-       pub onion_routing_packet: onion_message::Packet,
+       pub onion_routing_packet: onion_message::packet::Packet,
 }
 
 /// An [`update_fulfill_htlc`] message to be sent to or received from a peer.
@@ -2245,7 +2245,8 @@ impl Readable for OnionMessage {
                let blinding_point: PublicKey = Readable::read(r)?;
                let len: u16 = Readable::read(r)?;
                let mut packet_reader = FixedLengthReader::new(r, len as u64);
-               let onion_routing_packet: onion_message::Packet = <onion_message::Packet as LengthReadable>::read(&mut packet_reader)?;
+               let onion_routing_packet: onion_message::packet::Packet =
+                       <onion_message::packet::Packet as LengthReadable>::read(&mut packet_reader)?;
                Ok(Self {
                        blinding_point,
                        onion_routing_packet,
index 9f49b67932bc02c5972f5b7b1351a933df15eacb..6ffffec4dd0bd94febc085ed998051c7090da5eb 100644 (file)
@@ -31,8 +31,10 @@ use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, Mes
 use crate::ln::wire;
 use crate::ln::wire::{Encode, Type};
 #[cfg(not(c_bindings))]
-use crate::onion_message::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-use crate::onion_message::{CustomOnionMessageHandler, OffersMessage, OffersMessageHandler, OnionMessageContents, PendingOnionMessage};
+use crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
+use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::onion_message::packet::OnionMessageContents;
 use crate::routing::gossip::{NodeId, NodeAlias};
 use crate::util::atomic_counter::AtomicCounter;
 use crate::util::logger::{Logger, WithContext};
@@ -384,7 +386,7 @@ pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref, CustomM: Deref> where
        /// A message handler which handles onion messages. This should generally be an
        /// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
        ///
-       /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+       /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
        pub onion_message_handler: OM,
 
        /// A message handler which handles custom messages. The only LDK-provided implementation is
index b0031a6c3f4e05cd41823ceb9fa785ab3fb91722..23d27b18943c7514af3224e16904268c08df999e 100644 (file)
@@ -16,7 +16,9 @@ use crate::ln::msgs::{self, DecodeError, OnionMessageHandler, SocketAddress};
 use crate::sign::{EntropySource, NodeSigner, Recipient};
 use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
 use crate::util::test_utils;
-use super::{CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessageContents, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
+use super::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
+use super::offers::{OffersMessage, OffersMessageHandler};
+use super::packet::{OnionMessageContents, Packet};
 
 use bitcoin::network::constants::Network;
 use bitcoin::hashes::hex::FromHex;
@@ -571,8 +573,8 @@ fn spec_test_vector() {
        let sender_to_alice_packet_bytes_len = sender_to_alice_packet_bytes.len() as u64;
        let mut reader = io::Cursor::new(sender_to_alice_packet_bytes);
        let mut packet_reader = FixedLengthReader::new(&mut reader, sender_to_alice_packet_bytes_len);
-       let sender_to_alice_packet: super::Packet =
-               <super::Packet as LengthReadable>::read(&mut packet_reader).unwrap();
+       let sender_to_alice_packet: Packet =
+               <Packet as LengthReadable>::read(&mut packet_reader).unwrap();
        let secp_ctx = Secp256k1::new();
        let sender_to_alice_om = msgs::OnionMessage {
                blinding_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex("6363636363636363636363636363636363636363636363636363636363636363").unwrap()).unwrap()),
index a8ffcc02466f5afa918bf299e7a41572d24b72fc..5dc3bb422e67efffc880465ad5ab3769ce48fc7a 100644 (file)
@@ -7,8 +7,8 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
-//! more information.
+//! LDK sends, receives, and forwards onion messages via this [`OnionMessenger`], which lives here,
+//! as well as various types, traits, and utilities that it uses.
 
 use bitcoin::hashes::{Hash, HashEngine};
 use bitcoin::hashes::hmac::{Hmac, HmacEngine};
@@ -26,7 +26,7 @@ use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress};
 use crate::ln::onion_utils;
 use crate::routing::gossip::{NetworkGraph, NodeId};
-pub use super::packet::OnionMessageContents;
+use super::packet::OnionMessageContents;
 use super::packet::ParsedOnionMessageContents;
 use super::offers::OffersMessageHandler;
 use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
@@ -74,7 +74,8 @@ pub(super) const MAX_TIMER_TICKS: usize = 2;
 /// # use lightning::blinded_path::BlindedPath;
 /// # use lightning::sign::{EntropySource, KeysManager};
 /// # use lightning::ln::peer_handler::IgnoringMessageHandler;
-/// # use lightning::onion_message::{OnionMessageContents, Destination, MessageRouter, OnionMessagePath, OnionMessenger};
+/// # use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath, OnionMessenger};
+/// # use lightning::onion_message::packet::OnionMessageContents;
 /// # use lightning::util::logger::{Logger, Record};
 /// # use lightning::util::ser::{Writeable, Writer};
 /// # use lightning::io;
index d106a542fd368156f0a4f7959c37919d8e1821ec..05a8b7d6fbd033922fb140ea97d326287aec8757 100644 (file)
 //!
 //! [offers]: <https://github.com/lightning/bolts/pull/798>
 //! [blinded paths]: crate::blinded_path::BlindedPath
+//! [`OnionMessenger`]: self::messenger::OnionMessenger
 
-mod messenger;
-mod offers;
-mod packet;
+pub mod messenger;
+pub mod offers;
+pub mod packet;
 #[cfg(test)]
 mod functional_tests;
-
-// Re-export structs so they can be imported with just the `onion_message::` module prefix.
-pub use self::messenger::{CustomOnionMessageHandler, DefaultMessageRouter, Destination, MessageRouter, OnionMessageContents, OnionMessagePath, OnionMessenger, PeeledOnion, PendingOnionMessage, SendError};
-pub use self::messenger::{create_onion_message, peel_onion_message};
-#[cfg(not(c_bindings))]
-pub use self::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-pub use self::offers::{OffersMessage, OffersMessageHandler};
-pub use self::packet::{Packet, ParsedOnionMessageContents};
-pub(crate) use self::packet::ControlTlvs;
-pub(crate) use self::messenger::new_pending_onion_message;
index a65c0393a1e922042482cd3d16dca877cf110ea2..6672b5dfcdd0f3e33025c4bfe0654d6cfd2b0e56 100644 (file)
@@ -17,7 +17,7 @@ use crate::offers::invoice_error::InvoiceError;
 use crate::offers::invoice_request::InvoiceRequest;
 use crate::offers::invoice::Bolt12Invoice;
 use crate::offers::parse::Bolt12ParseError;
-use crate::onion_message::OnionMessageContents;
+use crate::onion_message::packet::OnionMessageContents;
 use crate::util::logger::Logger;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
 #[cfg(not(c_bindings))]
@@ -39,7 +39,7 @@ pub trait OffersMessageHandler {
        ///
        /// The returned [`OffersMessage`], if any, is enqueued to be sent by [`OnionMessenger`].
        ///
-       /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+       /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
        fn handle_message(&self, message: OffersMessage) -> Option<OffersMessage>;
 
        /// Releases any [`OffersMessage`]s that need to be sent.
@@ -54,7 +54,7 @@ pub trait OffersMessageHandler {
        /// Typically, this is used for messages initiating a payment flow rather than in response to
        /// another message. The latter should use the return value of [`Self::handle_message`].
        #[cfg(c_bindings)]
-       fn release_pending_messages(&self) -> Vec<(OffersMessage, crate::onion_message::Destination, Option<crate::blinded_path::BlindedPath>)> { vec![] }
+       fn release_pending_messages(&self) -> Vec<(OffersMessage, crate::onion_message::messenger::Destination, Option<crate::blinded_path::BlindedPath>)> { vec![] }
 }
 
 /// Possible BOLT 12 Offers messages sent and received via an [`OnionMessage`].
index f79da5ee9ec2d5b4340837cd486b1f59678c74b7..277661862e3361d72a3b3b44975a928422a87db9 100644 (file)
@@ -20,7 +20,7 @@ use crate::ln::channelmanager::{ChannelDetails, PaymentId};
 use crate::ln::features::{BlindedHopFeatures, Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
 use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
 use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
-use crate::onion_message::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
+use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
 use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
 use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp};
 use crate::sign::EntropySource;
index a006d37e90e434f92905cd66a06138820ff731b1..1c45c4a4c5d3f3d36120c6ce5fffb515c5032cb3 100644 (file)
@@ -32,7 +32,7 @@ use crate::ln::msgs::LightningError;
 use crate::ln::script::ShutdownScript;
 use crate::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
 use crate::offers::invoice_request::UnsignedInvoiceRequest;
-use crate::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use crate::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
 use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
 use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
 use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};