Merge pull request #1764 from G8XSU/rgs-ignore-error
authorJeffrey Czyz <jkczyz@gmail.com>
Wed, 19 Oct 2022 13:50:09 +0000 (08:50 -0500)
committerGitHub <noreply@github.com>
Wed, 19 Oct 2022 13:50:09 +0000 (08:50 -0500)
Ignore Duplicate Gossip Error while updating networkGraph from RGS

33 files changed:
.github/workflows/build.yml
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/lib.rs
lightning-block-sync/src/poll.rs
lightning-block-sync/src/rest.rs
lightning-block-sync/src/rpc.rs
lightning-invoice/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/keysinterface.rs
lightning/src/chain/mod.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/msgs.rs
lightning/src/ln/script.rs
lightning/src/onion_message/messenger.rs
lightning/src/onion_message/packet.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/chacha20poly1305rfc.rs
lightning/src/util/config.rs
lightning/src/util/enforcing_trait_impls.rs
lightning/src/util/errors.rs
lightning/src/util/events.rs
lightning/src/util/scid_utils.rs
lightning/src/util/ser.rs
rustfmt.toml [new file with mode: 0644]

index 799d22a64667ed3fd8de55e4fe4a5097da7e2dba..171c57fcb1cf1c56f1b7a272e4a4a26a0513a673 100644 (file)
@@ -12,7 +12,7 @@ jobs:
                      beta,
                      # 1.41.1 is MSRV for Rust-Lightning, lightning-invoice, and lightning-persister
                      1.41.1,
-                     # 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, and coverage generation
+                     # 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, lightning-background-processor, and coverage generation
                      1.45.2,
                      # 1.47.0 will be the MSRV for no-std builds using hashbrown once core2 is updated
                      1.47.0]
@@ -20,34 +20,43 @@ jobs:
           - toolchain: stable
             build-net-tokio: true
             build-no-std: true
+            build-futures: true
           - toolchain: stable
             platform: macos-latest
             build-net-tokio: true
             build-no-std: true
+            build-futures: true
           - toolchain: beta
             platform: macos-latest
             build-net-tokio: true
             build-no-std: true
+            build-futures: true
           - toolchain: stable
             platform: windows-latest
             build-net-tokio: true
             build-no-std: true
+            build-futures: true
           - toolchain: beta
             platform: windows-latest
             build-net-tokio: true
             build-no-std: true
+            build-futures: true
           - toolchain: beta
             build-net-tokio: true
             build-no-std: true
+            build-futures: true
           - toolchain: 1.41.1
             build-no-std: false
             test-log-variants: true
+            build-futures: false
           - toolchain: 1.45.2
             build-net-old-tokio: true
             build-net-tokio: true
             build-no-std: false
+            build-futures: true
             coverage: true
           - toolchain: 1.47.0
+            build-futures: true
             build-no-std: true
     runs-on: ${{ matrix.platform }}
     steps:
@@ -109,7 +118,7 @@ jobs:
       - name: Test on Rust ${{ matrix.toolchain }} with net-tokio and full code-linking for coverage generation
         if: matrix.coverage
         run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
-      - name: Test on no-std bullds Rust ${{ matrix.toolchain }}
+      - name: Test no-std builds on Rust ${{ matrix.toolchain }}
         if: "matrix.build-no-std && !matrix.coverage"
         shell: bash # Default on Winblows is powershell
         run: |
@@ -140,15 +149,26 @@ jobs:
         run: |
           cd lightning
           RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no-std
-          cd ..
+      - name: Test futures builds on Rust ${{ matrix.toolchain }}
+        if: "matrix.build-futures && !matrix.coverage"
+        shell: bash # Default on Winblows is powershell
+        run: |
+          cd lightning-background-processor
+          cargo test --verbose --color always --no-default-features --features futures
+      - name: Test futures builds on Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
+        if: "matrix.build-futures && matrix.coverage"
+        shell: bash # Default on Winblows is powershell
+        run: |
+          cd lightning-background-processor
+          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features futures
       - name: Test on Rust ${{ matrix.toolchain }}
         if: "! matrix.build-net-tokio"
         run: |
-          cargo test --verbose --color always  -p lightning
-          cargo test --verbose --color always  -p lightning-invoice
-          cargo test --verbose --color always  -p lightning-rapid-gossip-sync
-          cargo build --verbose  --color always -p lightning-persister
-          cargo build --verbose  --color always -p lightning-background-processor
+          cargo test --verbose --color always -p lightning
+          cargo test --verbose --color always -p lightning-invoice
+          cargo test --verbose --color always -p lightning-rapid-gossip-sync
+          cargo test --verbose --color always -p lightning-persister
+          cargo test --verbose --color always -p lightning-background-processor
       - name: Test C Bindings Modifications on Rust ${{ matrix.toolchain }}
         if: "! matrix.build-net-tokio"
         run: |
@@ -299,6 +319,7 @@ jobs:
         run: |
           cargo check --release
           cargo check --no-default-features --features=no-std --release
+          cargo check --no-default-features --features=futures --release
           cargo doc --release
 
   fuzz:
index 2c4d3df081b977992b0c3da671d76d1481608ca7..6dd06b57f6c09fd582f76db2615f995e5e375626 100644 (file)
@@ -13,11 +13,14 @@ edition = "2018"
 all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
+[features]
+futures = [ "futures-util" ]
+
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.111", path = "../lightning", features = ["std"] }
 lightning-rapid-gossip-sync = { version = "0.0.111", path = "../lightning-rapid-gossip-sync" }
-futures = { version = "0.3", optional = true }
+futures-util = { version = "0.3", default-features = false, features = ["async-await-macro"], optional = true }
 
 [dev-dependencies]
 lightning = { version = "0.0.111", path = "../lightning", features = ["_test_utils"] }
index 938deee9bd7a0bc61f47387b18e77fa5a5b7c42b..27cd0663e3291f27e385787d99769553c012de80 100644 (file)
@@ -35,7 +35,7 @@ use std::time::{Duration, Instant};
 use std::ops::Deref;
 
 #[cfg(feature = "futures")]
-use futures::{select, future::FutureExt};
+use futures_util::{select_biased, future::FutureExt};
 
 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
@@ -378,6 +378,7 @@ pub async fn process_events_async<
        Descriptor: 'static + SocketDescriptor + Send + Sync,
        CMH: 'static + Deref + Send + Sync,
        RMH: 'static + Deref + Send + Sync,
+       OMH: 'static + Deref + Send + Sync,
        EH: 'static + EventHandler + Send,
        PS: 'static + Deref + Send,
        M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
@@ -385,7 +386,7 @@ pub async fn process_events_async<
        PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
        RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
        UMH: 'static + Deref + Send + Sync,
-       PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
+       PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH>> + Send + Sync,
        S: 'static + Deref<Target = SC> + Send + Sync,
        SC: WriteableScore<'a>,
        SleepFuture: core::future::Future<Output = bool>,
@@ -405,6 +406,7 @@ where
        L::Target: 'static + Logger,
        P::Target: 'static + Persist<Signer>,
        CMH::Target: 'static + ChannelMessageHandler,
+       OMH::Target: 'static + OnionMessageHandler,
        RMH::Target: 'static + RoutingMessageHandler,
        UMH::Target: 'static + CustomMessageHandler,
        PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
@@ -412,7 +414,7 @@ where
        let mut should_continue = true;
        define_run_body!(persister, event_handler, chain_monitor, channel_manager,
                gossip_sync, peer_manager, logger, scorer, should_continue, {
-                       select! {
+                       select_biased! {
                                _ = channel_manager.get_persistable_update_future().fuse() => true,
                                cont = sleeper(Duration::from_millis(100)).fuse() => {
                                        should_continue = cont;
@@ -607,7 +609,7 @@ mod tests {
 
        const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
 
-       #[derive(Clone, Eq, Hash, PartialEq)]
+       #[derive(Clone, Hash, PartialEq, Eq)]
        struct TestDescriptor{}
        impl SocketDescriptor for TestDescriptor {
                fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
index d213e36dd72e6dba2e16e2e8509d76cac12d7f3c..afd2a7c300ee6388fa9c6a2b7263e34aeebc0aa3 100644 (file)
@@ -20,7 +20,7 @@ rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.111", path = "../lightning" }
-futures = { version = "0.3" }
+futures-util = { version = "0.3" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
 serde = { version = "1.0", features = ["derive"], optional = true }
 serde_json = { version = "1.0", optional = true }
index 5d8bc27f8d141c2dcaaad9f1f53f97a25daa1cb0..189a68be0654dab1453ef459d3046d5d0001c17d 100644 (file)
@@ -98,7 +98,7 @@ pub struct BlockSourceError {
 }
 
 /// The kind of `BlockSourceError`, either persistent or transient.
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub enum BlockSourceErrorKind {
        /// Indicates an error that won't resolve when retrying a request (e.g., invalid data).
        Persistent,
@@ -139,7 +139,7 @@ impl BlockSourceError {
 
 /// A block header and some associated data. This information should be available from most block
 /// sources (and, notably, is available in Bitcoin Core's RPC and REST interfaces).
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub struct BlockHeaderData {
        /// The block header itself.
        pub header: BlockHeader,
index 2bb2f4a07df9e4a0aa79759249d795cc81cd833d..05ccd4504fde0d81a76646d91b44d9b2b1653711 100644 (file)
@@ -4,6 +4,7 @@ use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Blo
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
+use lightning::chain::BestBlock;
 
 use std::ops::Deref;
 
@@ -29,7 +30,7 @@ pub trait Poll {
 }
 
 /// A chain tip relative to another chain tip in terms of block hash and chainwork.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum ChainTip {
        /// A chain tip with the same hash as another chain's tip.
        Common,
@@ -102,7 +103,7 @@ impl Validate for BlockData {
 }
 
 /// A block header with validated proof of work and corresponding block hash.
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub struct ValidatedBlockHeader {
        pub(crate) block_hash: BlockHash,
        inner: BlockHeaderData,
@@ -146,6 +147,19 @@ impl ValidatedBlockHeader {
 
                Ok(())
        }
+
+    /// Returns the [`BestBlock`] corresponding to this validated block header, which can be passed
+    /// into [`ChannelManager::new`] as part of its [`ChainParameters`]. Useful for ensuring that
+    /// the [`SpvClient`] and [`ChannelManager`] are initialized to the same block during a fresh
+    /// start.
+    ///
+    /// [`SpvClient`]: crate::SpvClient
+    /// [`ChainParameters`]: lightning::ln::channelmanager::ChainParameters
+    /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+    /// [`ChannelManager::new`]: lightning::ln::channelmanager::ChannelManager::new
+    pub fn to_best_block(&self) -> BestBlock {
+        BestBlock::new(self.block_hash, self.inner.height)
+    }
 }
 
 /// A block with validated data against its transaction list and corresponding block hash.
index f46e5e02eb2c03b03e717ffb53c706f67e7bf8d8..c73b23b600c2925e3e0a085a99fc75560527f064 100644 (file)
@@ -7,7 +7,7 @@ use crate::http::{BinaryResponse, HttpEndpoint, HttpClient, JsonResponse};
 use bitcoin::hash_types::BlockHash;
 use bitcoin::hashes::hex::ToHex;
 
-use futures::lock::Mutex;
+use futures_util::lock::Mutex;
 
 use std::convert::TryFrom;
 use std::convert::TryInto;
index 6e78654a9714a90b6438d6655c132db6fbbf45ee..f04769560246f8537e1e022efa22c8b7a815eab4 100644 (file)
@@ -7,7 +7,7 @@ use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse};
 use bitcoin::hash_types::BlockHash;
 use bitcoin::hashes::hex::ToHex;
 
-use futures::lock::Mutex;
+use futures_util::lock::Mutex;
 
 use serde_json;
 
index 9457d8ae6cc1ccf206df675030c9fc5b57a31160..5aacf53966c59a075e602ad059237b24eac51753 100644 (file)
@@ -101,7 +101,7 @@ mod sync;
 /// Errors that indicate what is wrong with the invoice. They have some granularity for debug
 /// reasons, but should generally result in an "invalid BOLT11 invoice" message for the user.
 #[allow(missing_docs)]
-#[derive(PartialEq, Debug, Clone)]
+#[derive(PartialEq, Eq, Debug, Clone)]
 pub enum ParseError {
        Bech32Error(bech32::Error),
        ParseAmountError(ParseIntError),
@@ -129,7 +129,7 @@ pub enum ParseError {
 /// Indicates that something went wrong while parsing or validating the invoice. Parsing errors
 /// should be mostly seen as opaque and are only there for debugging reasons. Semantic errors
 /// like wrong signatures, missing fields etc. could mean that someone tampered with the invoice.
-#[derive(PartialEq, Debug, Clone)]
+#[derive(PartialEq, Eq, Debug, Clone)]
 pub enum ParseOrSemanticError {
        /// The invoice couldn't be decoded
        ParseError(ParseError),
index 2479e8f7e7b5efd00582d2e9aaac7ecd9cfa9f31..3949810bf3ef33da01c3bbbd9b1cce0a1cfe65fc 100644 (file)
@@ -707,6 +707,7 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
              L::Target: Logger,
              P::Target: Persist<ChannelSigner>,
 {
+       #[cfg(not(anchors))]
        /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
        ///
        /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
@@ -722,6 +723,29 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
                        handler.handle_event(&event);
                }
        }
+       #[cfg(anchors)]
+       /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
+       ///
+       /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
+       /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
+       /// within each channel. As the confirmation of a commitment transaction may be critical to the
+       /// safety of funds, this method must be invoked frequently, ideally once for every chain tip
+       /// update (block connected or disconnected).
+       ///
+       /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
+       /// order to handle these events.
+       ///
+       /// [`SpendableOutputs`]: events::Event::SpendableOutputs
+       /// [`BumpTransaction`]: events::Event::BumpTransaction
+       fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+               let mut pending_events = Vec::new();
+               for monitor_state in self.monitors.read().unwrap().values() {
+                       pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
+               }
+               for event in pending_events.drain(..) {
+                       handler.handle_event(&event);
+               }
+       }
 }
 
 #[cfg(test)]
index 8f8cbdf448adb80d7fa50ee279c86862c1d0abd6..7c6b48d144b79b06c67105cc50d609f238b7581f 100644 (file)
@@ -21,8 +21,7 @@
 //! ChannelMonitors to get out of the HSM and onto monitoring devices.
 
 use bitcoin::blockdata::block::BlockHeader;
-use bitcoin::blockdata::transaction::{TxOut,Transaction};
-use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::transaction::{OutPoint as BitcoinOutPoint, TxOut, Transaction};
 use bitcoin::blockdata::script::{Script, Builder};
 use bitcoin::blockdata::opcodes;
 
@@ -44,6 +43,8 @@ use chain::{BestBlock, WatchedOutput};
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
 use chain::transaction::{OutPoint, TransactionData};
 use chain::keysinterface::{SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, Sign, KeysInterface};
+#[cfg(anchors)]
+use chain::onchaintx::ClaimEvent;
 use chain::onchaintx::OnchainTxHandler;
 use chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
 use chain::Filter;
@@ -51,6 +52,8 @@ use util::logger::Logger;
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, U48, OptionDeserWrapper};
 use util::byte_utils;
 use util::events::Event;
+#[cfg(anchors)]
+use util::events::{AnchorDescriptor, BumpTransactionEvent};
 
 use prelude::*;
 use core::{cmp, mem};
@@ -66,7 +69,7 @@ use sync::Mutex;
 /// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
 /// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
 /// transaction), a single update may reach upwards of 1 MiB in serialized size.
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq))]
+#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
 #[derive(Clone)]
 #[must_use]
 pub struct ChannelMonitorUpdate {
@@ -125,7 +128,7 @@ impl Readable for ChannelMonitorUpdate {
 }
 
 /// An event to be processed by the ChannelManager.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub enum MonitorEvent {
        /// A monitor event containing an HTLCUpdate.
        HTLCEvent(HTLCUpdate),
@@ -170,7 +173,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
 /// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
 /// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the
 /// preimage claim backward will lead to loss of funds.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub struct HTLCUpdate {
        pub(crate) payment_hash: PaymentHash,
        pub(crate) payment_preimage: Option<PaymentPreimage>,
@@ -236,7 +239,7 @@ pub const ANTI_REORG_DELAY: u32 = 6;
 pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
 
 // TODO(devrandom) replace this with HolderCommitmentTransaction
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 struct HolderSignedTx {
        /// txid of the transaction in tx, just used to make comparison faster
        txid: Txid,
@@ -263,9 +266,23 @@ impl_writeable_tlv_based!(HolderSignedTx, {
        (14, htlc_outputs, vec_type)
 });
 
+#[cfg(anchors)]
+impl HolderSignedTx {
+       fn non_dust_htlcs(&self) -> Vec<HTLCOutputInCommitment> {
+               self.htlc_outputs.iter().filter_map(|(htlc, _, _)| {
+                       if let Some(_) = htlc.transaction_output_index {
+                               Some(htlc.clone())
+                       } else {
+                               None
+                       }
+               })
+               .collect()
+       }
+}
+
 /// We use this to track static counterparty commitment transaction data and to generate any
 /// justice or 2nd-stage preimage/timeout transactions.
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 struct CounterpartyCommitmentParameters {
        counterparty_delayed_payment_base_key: PublicKey,
        counterparty_htlc_base_key: PublicKey,
@@ -319,7 +336,7 @@ impl Readable for CounterpartyCommitmentParameters {
 /// transaction causing it.
 ///
 /// Used to determine when the on-chain event can be considered safe from a chain reorganization.
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 struct OnchainEventEntry {
        txid: Txid,
        height: u32,
@@ -361,7 +378,7 @@ type CommitmentTxCounterpartyOutputInfo = Option<(u32, u64)>;
 
 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 enum OnchainEvent {
        /// An outbound HTLC failing after a transaction is confirmed. Used
        ///  * when an outbound HTLC output is spent by us after the HTLC timed out
@@ -471,7 +488,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
 
 );
 
-#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq))]
+#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
 #[derive(Clone)]
 pub(crate) enum ChannelMonitorUpdateStep {
        LatestHolderCommitmentTXInfo {
@@ -619,7 +636,7 @@ pub enum Balance {
 }
 
 /// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 struct IrrevocablyResolvedHTLC {
        commitment_tx_output_idx: Option<u32>,
        /// The txid of the transaction which resolved the HTLC, this may be a commitment (if the HTLC
@@ -1221,7 +1238,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                B::Target: BroadcasterInterface,
                L::Target: Logger,
        {
-               self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger)
+               self.inner.lock().unwrap().broadcast_latest_holder_commitment_txn(broadcaster, logger);
        }
 
        /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
@@ -2222,6 +2239,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
                }
                let mut ret = Ok(());
+               let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
                for update in updates.updates.iter() {
                        match update {
                                ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
@@ -2239,7 +2257,6 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                },
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
                                        log_trace!(logger, "Updating ChannelMonitor with payment preimage");
-                                       let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
                                        self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger)
                                },
                                ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
@@ -2255,6 +2272,25 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        self.lockdown_from_offchain = true;
                                        if *should_broadcast {
                                                self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
+                                               // If the channel supports anchor outputs, we'll need to emit an external
+                                               // event to be consumed such that a child transaction is broadcast with a
+                                               // high enough feerate for the parent commitment transaction to confirm.
+                                               if self.onchain_tx_handler.opt_anchors() {
+                                                       let funding_output = HolderFundingOutput::build(
+                                                               self.funding_redeemscript.clone(), self.channel_value_satoshis,
+                                                               self.onchain_tx_handler.opt_anchors(),
+                                                       );
+                                                       let best_block_height = self.best_block.height();
+                                                       let commitment_package = PackageTemplate::build_package(
+                                                               self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
+                                                               PackageSolvingData::HolderFundingOutput(funding_output),
+                                                               best_block_height, false, best_block_height,
+                                                       );
+                                                       self.onchain_tx_handler.update_claims_view(
+                                                               &[], vec![commitment_package], best_block_height, best_block_height,
+                                                               broadcaster, &bounded_fee_estimator, logger,
+                                                       );
+                                               }
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
                                                log_error!(logger, "    in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id()));
@@ -2309,6 +2345,34 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
        pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
                let mut ret = Vec::new();
                mem::swap(&mut ret, &mut self.pending_events);
+               #[cfg(anchors)]
+               for claim_event in self.onchain_tx_handler.get_and_clear_pending_claim_events().drain(..) {
+                       match claim_event {
+                               ClaimEvent::BumpCommitment {
+                                       package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_output_idx,
+                               } => {
+                                       let commitment_txid = commitment_tx.txid();
+                                       debug_assert_eq!(self.current_holder_commitment_tx.txid, commitment_txid);
+                                       let pending_htlcs = self.current_holder_commitment_tx.non_dust_htlcs();
+                                       let commitment_tx_fee_satoshis = self.channel_value_satoshis -
+                                               commitment_tx.output.iter().fold(0u64, |sum, output| sum + output.value);
+                                       ret.push(Event::BumpTransaction(BumpTransactionEvent::ChannelClose {
+                                               package_target_feerate_sat_per_1000_weight,
+                                               commitment_tx,
+                                               commitment_tx_fee_satoshis,
+                                               anchor_descriptor: AnchorDescriptor {
+                                                       channel_keys_id: self.channel_keys_id,
+                                                       channel_value_satoshis: self.channel_value_satoshis,
+                                                       outpoint: BitcoinOutPoint {
+                                                               txid: commitment_txid,
+                                                               vout: anchor_output_idx,
+                                                       },
+                                               },
+                                               pending_htlcs,
+                                       }));
+                               },
+                       }
+               }
                ret
        }
 
@@ -2521,13 +2585,13 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
                                                                self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
                                                                self.counterparty_commitment_params.counterparty_htlc_base_key,
-                                                               preimage.unwrap(), htlc.clone()))
+                                                               preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.opt_anchors()))
                                        } else {
                                                PackageSolvingData::CounterpartyReceivedHTLCOutput(
                                                        CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
                                                                self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
                                                                self.counterparty_commitment_params.counterparty_htlc_base_key,
-                                                               htlc.clone()))
+                                                               htlc.clone(), self.onchain_tx_handler.opt_anchors()))
                                        };
                                        let aggregation = if !htlc.offered { false } else { true };
                                        let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
@@ -2826,7 +2890,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                txid,
                                                transaction: Some((*tx).clone()),
-                                               height: height,
+                                               height,
                                                event: OnchainEvent::FundingSpendConfirmation {
                                                        on_local_output_csv: balance_spendable_csv,
                                                        commitment_tx_to_counterparty_output,
@@ -2884,21 +2948,26 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
 
                let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
                if should_broadcast {
-                       let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone());
+                       let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.opt_anchors());
                        let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
                        claimable_outpoints.push(commitment_package);
                        self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
                        let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
                        self.holder_tx_signed = true;
-                       // Because we're broadcasting a commitment transaction, we should construct the package
-                       // assuming it gets confirmed in the next block. Sadly, we have code which considers
-                       // "not yet confirmed" things as discardable, so we cannot do that here.
-                       let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
-                       let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
-                       if !new_outputs.is_empty() {
-                               watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+                       // We can't broadcast our HTLC transactions while the commitment transaction is
+                       // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
+                       // `transactions_confirmed`.
+                       if !self.onchain_tx_handler.opt_anchors() {
+                               // Because we're broadcasting a commitment transaction, we should construct the package
+                               // assuming it gets confirmed in the next block. Sadly, we have code which considers
+                               // "not yet confirmed" things as discardable, so we cannot do that here.
+                               let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
+                               let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
+                               if !new_outputs.is_empty() {
+                                       watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+                               }
+                               claimable_outpoints.append(&mut new_outpoints);
                        }
-                       claimable_outpoints.append(&mut new_outpoints);
                }
 
                // Find which on-chain events have reached their confirmation threshold.
@@ -3418,7 +3487,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                        let entry = OnchainEventEntry {
                                txid: tx.txid(),
                                transaction: Some(tx.clone()),
-                               height: height,
+                               height,
                                event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
                        };
                        log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold());
index 73b8a1b98224ace7aef2f6db05a82ca2a020e476..1c64a8bccdfbe4953b6944cb9b9bbc4115b18f4e 100644 (file)
@@ -36,6 +36,7 @@ use util::crypto::{hkdf_extract_expand_twice, sign};
 use util::ser::{Writeable, Writer, Readable, ReadableArgs};
 
 use chain::transaction::OutPoint;
+use ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI;
 use ln::{chan_utils, PaymentPreimage};
 use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, HolderCommitmentTransaction, ChannelTransactionParameters, CommitmentTransaction, ClosingTransaction};
 use ln::msgs::UnsignedChannelAnnouncement;
@@ -55,7 +56,7 @@ pub struct KeyMaterial(pub [u8; 32]);
 
 /// Information about a spendable output to a P2WSH script. See
 /// SpendableOutputDescriptor::DelayedPaymentOutput for more details on how to spend this.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct DelayedPaymentOutputDescriptor {
        /// The outpoint which is spendable
        pub outpoint: OutPoint,
@@ -95,7 +96,7 @@ impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, {
 
 /// Information about a spendable output to our "payment key". See
 /// SpendableOutputDescriptor::StaticPaymentOutput for more details on how to spend this.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct StaticPaymentOutputDescriptor {
        /// The outpoint which is spendable
        pub outpoint: OutPoint,
@@ -126,7 +127,7 @@ impl_writeable_tlv_based!(StaticPaymentOutputDescriptor, {
 /// spend on-chain. The information needed to do this is provided in this enum, including the
 /// outpoint describing which txid and output index is available, the full output which exists at
 /// that txid/index, and any keys or other information required to sign.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum SpendableOutputDescriptor {
        /// An output to a script which was provided via KeysInterface directly, either from
        /// `get_destination_script()` or `get_shutdown_scriptpubkey()`, thus you should already know
@@ -348,6 +349,12 @@ pub trait BaseSign {
        /// chosen to forgo their output as dust.
        fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
 
+       /// Computes the signature for a commitment transaction's anchor output used as an
+       /// input within `anchor_tx`, which spends the commitment transaction, at index `input`.
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &mut Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()>;
+
        /// Signs a channel announcement message with our funding key and our node secret key (aka
        /// node_id or network_key), proving it comes from one of the channel participants.
        ///
@@ -645,6 +652,7 @@ impl InMemorySigner {
                witness.push(witness_script.clone().into_bytes());
                Ok(witness)
        }
+
 }
 
 impl BaseSign for InMemorySigner {
@@ -762,6 +770,16 @@ impl BaseSign for InMemorySigner {
                Ok(closing_tx.trust().sign(&self.funding_key, &channel_funding_redeemscript, self.channel_value_satoshis, secp_ctx))
        }
 
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &mut Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()> {
+               let witness_script = chan_utils::get_anchor_redeemscript(&self.holder_channel_pubkeys.funding_pubkey);
+               let sighash = sighash::SighashCache::new(&*anchor_tx).segwit_signature_hash(
+                       input, &witness_script, ANCHOR_OUTPUT_VALUE_SATOSHI, EcdsaSighashType::All,
+               ).unwrap();
+               Ok(sign(secp_ctx, &hash_to_message!(&sighash[..]), &self.funding_key))
+       }
+
        fn sign_channel_announcement(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>)
        -> Result<(Signature, Signature), ()> {
                let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
index c54f9b1d7eba6814cb831bfd3c91c112fcc096cf..bb80440d116a5a11286c41745d9b55e2c07137a5 100644 (file)
@@ -32,7 +32,7 @@ pub(crate) mod onchaintx;
 pub(crate) mod package;
 
 /// The best known block as identified by its hash and height.
-#[derive(Clone, Copy, PartialEq)]
+#[derive(Clone, Copy, PartialEq, Eq)]
 pub struct BestBlock {
        block_hash: BlockHash,
        height: u32,
@@ -188,7 +188,7 @@ pub trait Confirm {
 }
 
 /// An enum representing the status of a channel monitor update persistence.
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub enum ChannelMonitorUpdateStatus {
        /// The update has been durably persisted and all copies of the relevant [`ChannelMonitor`]
        /// have been updated.
@@ -364,7 +364,7 @@ pub trait Filter {
 ///
 /// [`ChannelMonitor`]: channelmonitor::ChannelMonitor
 /// [`ChannelMonitor::block_connected`]: channelmonitor::ChannelMonitor::block_connected
-#[derive(Clone, PartialEq, Hash)]
+#[derive(Clone, PartialEq, Eq, Hash)]
 pub struct WatchedOutput {
        /// First block where the transaction output may have been spent.
        pub block_hash: Option<BlockHash>,
index 0f2edff5ed78bd3472e2f7c93ad8b2e845c62ba1..875f4d896d5d114c97fa4c0777e3b761d03e2b27 100644 (file)
@@ -23,10 +23,16 @@ use bitcoin::secp256k1;
 
 use ln::msgs::DecodeError;
 use ln::PaymentPreimage;
+#[cfg(anchors)]
+use ln::chan_utils;
 use ln::chan_utils::{ChannelTransactionParameters, HolderCommitmentTransaction};
+#[cfg(anchors)]
+use chain::chaininterface::ConfirmationTarget;
 use chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
 use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
 use chain::keysinterface::{Sign, KeysInterface};
+#[cfg(anchors)]
+use chain::package::PackageSolvingData;
 use chain::package::PackageTemplate;
 use util::logger::Logger;
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writer, Writeable, VecWriter};
@@ -38,6 +44,8 @@ use alloc::collections::BTreeMap;
 use core::cmp;
 use core::ops::Deref;
 use core::mem::replace;
+#[cfg(anchors)]
+use core::mem::swap;
 use bitcoin::hashes::Hash;
 
 const MAX_ALLOC_SIZE: usize = 64*1024;
@@ -46,7 +54,7 @@ const MAX_ALLOC_SIZE: usize = 64*1024;
 /// transaction causing it.
 ///
 /// Used to determine when the on-chain event can be considered safe from a chain reorganization.
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 struct OnchainEventEntry {
        txid: Txid,
        height: u32,
@@ -65,7 +73,7 @@ impl OnchainEventEntry {
 
 /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
 /// once they mature to enough confirmations (ANTI_REORG_DELAY)
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 enum OnchainEvent {
        /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
        /// bump-txn candidate buffer.
@@ -162,6 +170,29 @@ impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
        }
 }
 
+// Represents the different types of claims for which events are yielded externally to satisfy said
+// claims.
+#[cfg(anchors)]
+pub(crate) enum ClaimEvent {
+       /// Event yielded to signal that the commitment transaction fee must be bumped to claim any
+       /// encumbered funds and proceed to HTLC resolution, if any HTLCs exist.
+       BumpCommitment {
+               package_target_feerate_sat_per_1000_weight: u32,
+               commitment_tx: Transaction,
+               anchor_output_idx: u32,
+       },
+}
+
+/// Represents the different ways an output can be claimed (i.e., spent to an address under our
+/// control) onchain.
+pub(crate) enum OnchainClaim {
+       /// A finalized transaction pending confirmation spending the output to claim.
+       Tx(Transaction),
+       #[cfg(anchors)]
+       /// An event yielded externally to signal additional inputs must be added to a transaction
+       /// pending confirmation spending the output to claim.
+       Event(ClaimEvent),
+}
 
 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
 /// do RBF bumping if possible.
@@ -193,6 +224,8 @@ pub struct OnchainTxHandler<ChannelSigner: Sign> {
        pub(crate) pending_claim_requests: HashMap<Txid, PackageTemplate>,
        #[cfg(not(test))]
        pending_claim_requests: HashMap<Txid, PackageTemplate>,
+       #[cfg(anchors)]
+       pending_claim_events: HashMap<Txid, ClaimEvent>,
 
        // Used to link outpoints claimed in a connected block to a pending claim request.
        // Key is outpoint than monitor parsing has detected we have keys/scripts to claim
@@ -342,6 +375,8 @@ impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
                        locktimed_packages,
                        pending_claim_requests,
                        onchain_events_awaiting_threshold_conf,
+                       #[cfg(anchors)]
+                       pending_claim_events: HashMap::new(),
                        secp_ctx,
                })
        }
@@ -361,6 +396,8 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                        claimable_outpoints: HashMap::new(),
                        locktimed_packages: BTreeMap::new(),
                        onchain_events_awaiting_threshold_conf: Vec::new(),
+                       #[cfg(anchors)]
+                       pending_claim_events: HashMap::new(),
 
                        secp_ctx,
                }
@@ -374,11 +411,22 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                self.holder_commitment.to_broadcaster_value_sat()
        }
 
-       /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
-       /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
-       /// Panics if there are signing errors, because signing operations in reaction to on-chain events
-       /// are not expected to fail, and if they do, we may lose funds.
-       fn generate_claim_tx<F: Deref, L: Deref>(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
+       #[cfg(anchors)]
+       pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<ClaimEvent> {
+               let mut ret = HashMap::new();
+               swap(&mut ret, &mut self.pending_claim_events);
+               ret.into_iter().map(|(_, event)| event).collect::<Vec<_>>()
+       }
+
+       /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
+       /// onchain) lays on the assumption of claim transactions getting confirmed before timelock
+       /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck
+       /// in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or
+       /// Child-Pay-For-Parent.
+       ///
+       /// Panics if there are signing errors, because signing operations in reaction to on-chain
+       /// events are not expected to fail, and if they do, we may lose funds.
+       fn generate_claim<F: Deref, L: Deref>(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L) -> Option<(Option<u32>, u64, OnchainClaim)>
                where F::Target: FeeEstimator,
                                        L::Target: Logger,
        {
@@ -388,23 +436,71 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
                let new_timer = Some(cached_request.get_height_timer(cur_height));
                if cached_request.is_malleable() {
-                       let predicted_weight = cached_request.package_weight(&self.destination_script, self.channel_transaction_parameters.opt_anchors.is_some());
+                       let predicted_weight = cached_request.package_weight(&self.destination_script);
                        if let Some((output_value, new_feerate)) =
                                        cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().to_sat(), fee_estimator, logger) {
                                assert!(new_feerate != 0);
 
-                               let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
+                               let transaction = cached_request.finalize_malleable_package(self, output_value, self.destination_script.clone(), logger).unwrap();
                                log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
                                assert!(predicted_weight >= transaction.weight());
-                               return Some((new_timer, new_feerate, transaction))
+                               return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction)))
                        }
                } else {
-                       // Note: Currently, amounts of holder outputs spending witnesses aren't used
-                       // as we can't malleate spending package to increase their feerate. This
-                       // should change with the remaining anchor output patchset.
-                       if let Some(transaction) = cached_request.finalize_package(self, 0, self.destination_script.clone(), logger) {
-                               return Some((None, 0, transaction));
+                       // Untractable packages cannot have their fees bumped through Replace-By-Fee. Some
+                       // packages may support fee bumping through Child-Pays-For-Parent, indicated by those
+                       // which require external funding.
+                       #[cfg(not(anchors))]
+                       let inputs = cached_request.inputs();
+                       #[cfg(anchors)]
+                       let mut inputs = cached_request.inputs();
+                       debug_assert_eq!(inputs.len(), 1);
+                       let tx = match cached_request.finalize_untractable_package(self, logger) {
+                               Some(tx) => tx,
+                               None => return None,
+                       };
+                       if !cached_request.requires_external_funding() {
+                               return Some((None, 0, OnchainClaim::Tx(tx)));
                        }
+                       #[cfg(anchors)]
+                       return inputs.find_map(|input| match input {
+                               // Commitment inputs with anchors support are the only untractable inputs supported
+                               // thus far that require external funding.
+                               PackageSolvingData::HolderFundingOutput(..) => {
+                                       debug_assert_eq!(tx.txid(), self.holder_commitment.trust().txid(),
+                                               "Holder commitment transaction mismatch");
+                                       // We'll locate an anchor output we can spend within the commitment transaction.
+                                       let funding_pubkey = &self.channel_transaction_parameters.holder_pubkeys.funding_pubkey;
+                                       match chan_utils::get_anchor_output(&tx, funding_pubkey) {
+                                               // An anchor output was found, so we should yield a funding event externally.
+                                               Some((idx, _)) => {
+                                                       // TODO: Use a lower confirmation target when both our and the
+                                                       // counterparty's latest commitment don't have any HTLCs present.
+                                                       let conf_target = ConfirmationTarget::HighPriority;
+                                                       let package_target_feerate_sat_per_1000_weight = cached_request
+                                                               .compute_package_feerate(fee_estimator, conf_target);
+                                                       Some((
+                                                               new_timer,
+                                                               package_target_feerate_sat_per_1000_weight as u64,
+                                                               OnchainClaim::Event(ClaimEvent::BumpCommitment {
+                                                                       package_target_feerate_sat_per_1000_weight,
+                                                                       commitment_tx: tx.clone(),
+                                                                       anchor_output_idx: idx,
+                                                               }),
+                                                       ))
+                                               },
+                                               // An anchor output was not found. There's nothing we can do other than
+                                               // attempt to broadcast the transaction with its current fee rate and hope
+                                               // it confirms. This is essentially the same behavior as a commitment
+                                               // transaction without anchor outputs.
+                                               None => Some((None, 0, OnchainClaim::Tx(tx.clone()))),
+                                       }
+                               },
+                               _ => {
+                                       debug_assert!(false, "Only HolderFundingOutput inputs should be untractable and require external funding");
+                                       None
+                               },
+                       });
                }
                None
        }
@@ -475,17 +571,30 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                // Generate claim transactions and track them to bump if necessary at
                // height timer expiration (i.e in how many blocks we're going to take action).
                for mut req in preprocessed_requests {
-                       if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(cur_height, &req, &*fee_estimator, &*logger) {
+                       if let Some((new_timer, new_feerate, claim)) = self.generate_claim(cur_height, &req, &*fee_estimator, &*logger) {
                                req.set_timer(new_timer);
                                req.set_feerate(new_feerate);
-                               let txid = tx.txid();
+                               let txid = match claim {
+                                       OnchainClaim::Tx(tx) => {
+                                               log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
+                                               broadcaster.broadcast_transaction(&tx);
+                                               tx.txid()
+                                       },
+                                       #[cfg(anchors)]
+                                       OnchainClaim::Event(claim_event) => {
+                                               log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints());
+                                               let txid = match claim_event {
+                                                       ClaimEvent::BumpCommitment { ref commitment_tx, .. } => commitment_tx.txid(),
+                                               };
+                                               self.pending_claim_events.insert(txid, claim_event);
+                                               txid
+                                       },
+                               };
                                for k in req.outpoints() {
                                        log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
                                        self.claimable_outpoints.insert(k.clone(), (txid, conf_height));
                                }
                                self.pending_claim_requests.insert(txid, req);
-                               log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
-                               broadcaster.broadcast_transaction(&tx);
                        }
                }
 
@@ -577,6 +686,8 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                                                        for outpoint in request.outpoints() {
                                                                log_debug!(logger, "Removing claim tracking for {} due to maturation of claim tx {}.", outpoint, claim_request);
                                                                self.claimable_outpoints.remove(&outpoint);
+                                                               #[cfg(anchors)]
+                                                               self.pending_claim_events.remove(&claim_request);
                                                        }
                                                }
                                        },
@@ -603,9 +714,18 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                // Build, bump and rebroadcast tx accordingly
                log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
                for (first_claim_txid, request) in bump_candidates.iter() {
-                       if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(cur_height, &request, &*fee_estimator, &*logger) {
-                               log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
-                               broadcaster.broadcast_transaction(&bump_tx);
+                       if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) {
+                               match bump_claim {
+                                       OnchainClaim::Tx(bump_tx) => {
+                                               log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
+                                               broadcaster.broadcast_transaction(&bump_tx);
+                                       },
+                                       #[cfg(anchors)]
+                                       OnchainClaim::Event(claim_event) => {
+                                               log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
+                                               self.pending_claim_events.insert(*first_claim_txid, claim_event);
+                                       },
+                               }
                                if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
                                        request.set_timer(new_timer);
                                        request.set_feerate(new_feerate);
@@ -667,12 +787,21 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
                                self.onchain_events_awaiting_threshold_conf.push(entry);
                        }
                }
-               for (_, request) in bump_candidates.iter_mut() {
-                       if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, fee_estimator, &&*logger) {
+               for (_first_claim_txid_height, request) in bump_candidates.iter_mut() {
+                       if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) {
                                request.set_timer(new_timer);
                                request.set_feerate(new_feerate);
-                               log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
-                               broadcaster.broadcast_transaction(&bump_tx);
+                               match bump_claim {
+                                       OnchainClaim::Tx(bump_tx) => {
+                                               log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+                                               broadcaster.broadcast_transaction(&bump_tx);
+                                       },
+                                       #[cfg(anchors)]
+                                       OnchainClaim::Event(claim_event) => {
+                                               log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
+                                               self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event);
+                                       },
+                               }
                        }
                }
                for (ancestor_claim_txid, request) in bump_candidates.drain() {
index c945d8909da4a61cc656a634a75f0b130eb342d2..5aa55fb197583be8e97f4ecc8c05ee806df2433c 100644 (file)
@@ -34,6 +34,8 @@ use util::ser::{Readable, Writer, Writeable};
 use io;
 use prelude::*;
 use core::cmp;
+#[cfg(anchors)]
+use core::convert::TryInto;
 use core::mem;
 use core::ops::Deref;
 use bitcoin::{PackedLockTime, Sequence, Witness};
@@ -86,7 +88,7 @@ const HIGH_FREQUENCY_BUMP_INTERVAL: u32 = 1;
 ///
 /// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used
 /// as part of the signature hash and revocation secret to generate a satisfying witness.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) struct RevokedOutput {
        per_commitment_point: PublicKey,
        counterparty_delayed_payment_base_key: PublicKey,
@@ -129,7 +131,7 @@ impl_writeable_tlv_based!(RevokedOutput, {
 ///
 /// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part
 /// of the signature hash and revocation secret to generate a satisfying witness.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) struct RevokedHTLCOutput {
        per_commitment_point: PublicKey,
        counterparty_delayed_payment_base_key: PublicKey,
@@ -171,29 +173,36 @@ impl_writeable_tlv_based!(RevokedHTLCOutput, {
 /// witnessScript.
 ///
 /// The preimage is used as part of the witness.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) struct CounterpartyOfferedHTLCOutput {
        per_commitment_point: PublicKey,
        counterparty_delayed_payment_base_key: PublicKey,
        counterparty_htlc_base_key: PublicKey,
        preimage: PaymentPreimage,
-       htlc: HTLCOutputInCommitment
+       htlc: HTLCOutputInCommitment,
+       opt_anchors: Option<()>,
 }
 
 impl CounterpartyOfferedHTLCOutput {
-       pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment) -> Self {
+       pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, opt_anchors: bool) -> Self {
                CounterpartyOfferedHTLCOutput {
                        per_commitment_point,
                        counterparty_delayed_payment_base_key,
                        counterparty_htlc_base_key,
                        preimage,
-                       htlc
+                       htlc,
+                       opt_anchors: if opt_anchors { Some(()) } else { None },
                }
        }
+
+       fn opt_anchors(&self) -> bool {
+               self.opt_anchors.is_some()
+       }
 }
 
 impl_writeable_tlv_based!(CounterpartyOfferedHTLCOutput, {
        (0, per_commitment_point, required),
+       (1, opt_anchors, option),
        (2, counterparty_delayed_payment_base_key, required),
        (4, counterparty_htlc_base_key, required),
        (6, preimage, required),
@@ -204,27 +213,34 @@ impl_writeable_tlv_based!(CounterpartyOfferedHTLCOutput, {
 ///
 /// HTLCOutputInCommitment (hash, timelock, directon) and pubkeys are used to generate a suitable
 /// witnessScript.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) struct CounterpartyReceivedHTLCOutput {
        per_commitment_point: PublicKey,
        counterparty_delayed_payment_base_key: PublicKey,
        counterparty_htlc_base_key: PublicKey,
-       htlc: HTLCOutputInCommitment
+       htlc: HTLCOutputInCommitment,
+       opt_anchors: Option<()>,
 }
 
 impl CounterpartyReceivedHTLCOutput {
-       pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, htlc: HTLCOutputInCommitment) -> Self {
+       pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, htlc: HTLCOutputInCommitment, opt_anchors: bool) -> Self {
                CounterpartyReceivedHTLCOutput {
                        per_commitment_point,
                        counterparty_delayed_payment_base_key,
                        counterparty_htlc_base_key,
-                       htlc
+                       htlc,
+                       opt_anchors: if opt_anchors { Some(()) } else { None },
                }
        }
+
+       fn opt_anchors(&self) -> bool {
+               self.opt_anchors.is_some()
+       }
 }
 
 impl_writeable_tlv_based!(CounterpartyReceivedHTLCOutput, {
        (0, per_commitment_point, required),
+       (1, opt_anchors, option),
        (2, counterparty_delayed_payment_base_key, required),
        (4, counterparty_htlc_base_key, required),
        (6, htlc, required),
@@ -234,7 +250,7 @@ impl_writeable_tlv_based!(CounterpartyReceivedHTLCOutput, {
 ///
 /// Either offered or received, the amount is always used as part of the bip143 sighash.
 /// Preimage is only included as part of the witness in former case.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) struct HolderHTLCOutput {
        preimage: Option<PaymentPreimage>,
        amount: u64,
@@ -269,28 +285,39 @@ impl_writeable_tlv_based!(HolderHTLCOutput, {
 /// A struct to describe the channel output on the funding transaction.
 ///
 /// witnessScript is used as part of the witness redeeming the funding utxo.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) struct HolderFundingOutput {
        funding_redeemscript: Script,
+       funding_amount: Option<u64>,
+       opt_anchors: Option<()>,
 }
 
+
 impl HolderFundingOutput {
-       pub(crate) fn build(funding_redeemscript: Script) -> Self {
+       pub(crate) fn build(funding_redeemscript: Script, funding_amount: u64, opt_anchors: bool) -> Self {
                HolderFundingOutput {
                        funding_redeemscript,
+                       funding_amount: Some(funding_amount),
+                       opt_anchors: if opt_anchors { Some(()) } else { None },
                }
        }
+
+       fn opt_anchors(&self) -> bool {
+               self.opt_anchors.is_some()
+       }
 }
 
 impl_writeable_tlv_based!(HolderFundingOutput, {
        (0, funding_redeemscript, required),
+       (1, opt_anchors, option),
+       (3, funding_amount, option),
 });
 
 /// A wrapper encapsulating all in-protocol differing outputs types.
 ///
 /// The generic API offers access to an outputs common attributes or allow transformation such as
 /// finalizing an input claiming the output.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) enum PackageSolvingData {
        RevokedOutput(RevokedOutput),
        RevokedHTLCOutput(RevokedHTLCOutput),
@@ -303,24 +330,27 @@ pub(crate) enum PackageSolvingData {
 impl PackageSolvingData {
        fn amount(&self) -> u64 {
                let amt = match self {
-                       PackageSolvingData::RevokedOutput(ref outp) => { outp.amount },
-                       PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.amount },
-                       PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
-                       PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => { outp.htlc.amount_msat / 1000 },
+                       PackageSolvingData::RevokedOutput(ref outp) => outp.amount,
+                       PackageSolvingData::RevokedHTLCOutput(ref outp) => outp.amount,
+                       PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => outp.htlc.amount_msat / 1000,
+                       PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => outp.htlc.amount_msat / 1000,
                        // Note: Currently, amounts of holder outputs spending witnesses aren't used
                        // as we can't malleate spending package to increase their feerate. This
                        // should change with the remaining anchor output patchset.
-                       PackageSolvingData::HolderHTLCOutput(..) => { unreachable!() },
-                       PackageSolvingData::HolderFundingOutput(..) => { unreachable!() },
+                       PackageSolvingData::HolderHTLCOutput(..) => unreachable!(),
+                       PackageSolvingData::HolderFundingOutput(ref outp) => {
+                               debug_assert!(outp.opt_anchors());
+                               outp.funding_amount.unwrap()
+                       }
                };
                amt
        }
-       fn weight(&self, opt_anchors: bool) -> usize {
+       fn weight(&self) -> usize {
                let weight = match self {
                        PackageSolvingData::RevokedOutput(ref outp) => { outp.weight as usize },
                        PackageSolvingData::RevokedHTLCOutput(ref outp) => { outp.weight as usize },
-                       PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { weight_offered_htlc(opt_anchors) as usize },
-                       PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { weight_received_htlc(opt_anchors) as usize },
+                       PackageSolvingData::CounterpartyOfferedHTLCOutput(ref outp) => { weight_offered_htlc(outp.opt_anchors()) as usize },
+                       PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => { weight_received_htlc(outp.opt_anchors()) as usize },
                        // Note: Currently, weights of holder outputs spending witnesses aren't used
                        // as we can't malleate spending package to increase their feerate. This
                        // should change with the remaining anchor output patchset.
@@ -444,7 +474,7 @@ impl_writeable_tlv_based_enum!(PackageSolvingData, ;
 /// A malleable package might be aggregated with other packages to save on fees.
 /// A untractable package has been counter-signed and aggregable will break cached counterparty
 /// signatures.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub(crate) enum PackageMalleability {
        Malleable,
        Untractable,
@@ -459,7 +489,7 @@ pub(crate) enum PackageMalleability {
 ///
 /// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals.
 /// Failing to confirm a package translate as a loss of funds for the user.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub struct PackageTemplate {
        // List of onchain outputs and solving data to generate satisfying witnesses.
        inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>,
@@ -520,6 +550,9 @@ impl PackageTemplate {
        pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> {
                self.inputs.iter().map(|(o, _)| o).collect()
        }
+       pub(crate) fn inputs(&self) -> impl ExactSizeIterator<Item = &PackageSolvingData> {
+               self.inputs.iter().map(|(_, i)| i)
+       }
        pub(crate) fn split_package(&mut self, split_outp: &BitcoinOutPoint) -> Option<PackageTemplate> {
                match self.malleability {
                        PackageMalleability::Malleable => {
@@ -583,7 +616,7 @@ impl PackageTemplate {
        }
        /// Gets the amount of all outptus being spent by this package, only valid for malleable
        /// packages.
-       fn package_amount(&self) -> u64 {
+       pub(crate) fn package_amount(&self) -> u64 {
                let mut amounts = 0;
                for (_, outp) in self.inputs.iter() {
                        amounts += outp.amount();
@@ -594,13 +627,13 @@ impl PackageTemplate {
                self.inputs.iter().map(|(_, outp)| outp.absolute_tx_timelock(self.height_original))
                        .max().expect("There must always be at least one output to spend in a PackageTemplate")
        }
-       pub(crate) fn package_weight(&self, destination_script: &Script, opt_anchors: bool) -> usize {
+       pub(crate) fn package_weight(&self, destination_script: &Script) -> usize {
                let mut inputs_weight = 0;
                let mut witnesses_weight = 2; // count segwit flags
                for (_, outp) in self.inputs.iter() {
                        // previous_out_point: 36 bytes ; var_int: 1 byte ; sequence: 4 bytes
                        inputs_weight += 41 * WITNESS_SCALE_FACTOR;
-                       witnesses_weight += outp.weight(opt_anchors);
+                       witnesses_weight += outp.weight();
                }
                // version: 4 bytes ; count_tx_in: 1 byte ; count_tx_out: 1 byte ; lock_time: 4 bytes
                let transaction_weight = 10 * WITNESS_SCALE_FACTOR;
@@ -608,47 +641,46 @@ impl PackageTemplate {
                let output_weight = (8 + 1 + destination_script.len()) * WITNESS_SCALE_FACTOR;
                inputs_weight + witnesses_weight + transaction_weight + output_weight
        }
-       pub(crate) fn finalize_package<L: Deref, Signer: Sign>(&self, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64, destination_script: Script, logger: &L) -> Option<Transaction>
-               where L::Target: Logger,
-       {
-               match self.malleability {
-                       PackageMalleability::Malleable => {
-                               let mut bumped_tx = Transaction {
-                                       version: 2,
-                                       lock_time: PackedLockTime::ZERO,
-                                       input: vec![],
-                                       output: vec![TxOut {
-                                               script_pubkey: destination_script,
-                                               value,
-                                       }],
-                               };
-                               for (outpoint, _) in self.inputs.iter() {
-                                       bumped_tx.input.push(TxIn {
-                                               previous_output: *outpoint,
-                                               script_sig: Script::new(),
-                                               sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
-                                               witness: Witness::new(),
-                                       });
-                               }
-                               for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
-                                       log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
-                                       if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
-                               }
-                               log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
-                               return Some(bumped_tx);
-                       },
-                       PackageMalleability::Untractable => {
-                               debug_assert_eq!(value, 0, "value is ignored for non-malleable packages, should be zero to ensure callsites are correct");
-                               if let Some((outpoint, outp)) = self.inputs.first() {
-                                       if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
-                                               log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
-                                               log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
-                                               return Some(final_tx);
-                                       }
-                                       return None;
-                               } else { panic!("API Error: Package must not be inputs empty"); }
-                       },
+       pub(crate) fn finalize_malleable_package<L: Deref, Signer: Sign>(
+               &self, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64, destination_script: Script, logger: &L
+       ) -> Option<Transaction> where L::Target: Logger {
+               debug_assert!(self.is_malleable());
+               let mut bumped_tx = Transaction {
+                       version: 2,
+                       lock_time: PackedLockTime::ZERO,
+                       input: vec![],
+                       output: vec![TxOut {
+                               script_pubkey: destination_script,
+                               value,
+                       }],
+               };
+               for (outpoint, _) in self.inputs.iter() {
+                       bumped_tx.input.push(TxIn {
+                               previous_output: *outpoint,
+                               script_sig: Script::new(),
+                               sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+                               witness: Witness::new(),
+                       });
+               }
+               for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
+                       log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+                       if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
                }
+               log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
+               Some(bumped_tx)
+       }
+       pub(crate) fn finalize_untractable_package<L: Deref, Signer: Sign>(
+               &self, onchain_handler: &mut OnchainTxHandler<Signer>, logger: &L,
+       ) -> Option<Transaction> where L::Target: Logger {
+               debug_assert!(!self.is_malleable());
+               if let Some((outpoint, outp)) = self.inputs.first() {
+                       if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
+                               log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+                               log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
+                               return Some(final_tx);
+                       }
+                       return None;
+               } else { panic!("API Error: Package must not be inputs empty"); }
        }
        /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
        /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
@@ -686,14 +718,45 @@ impl PackageTemplate {
                }
                None
        }
+
+       #[cfg(anchors)]
+       /// Computes a feerate based on the given confirmation target. If a previous feerate was used,
+       /// and the new feerate is below it, we'll use a 25% increase of the previous feerate instead of
+       /// the new one.
+       pub(crate) fn compute_package_feerate<F: Deref>(
+               &self, fee_estimator: &LowerBoundedFeeEstimator<F>, conf_target: ConfirmationTarget,
+       ) -> u32 where F::Target: FeeEstimator {
+               let feerate_estimate = fee_estimator.bounded_sat_per_1000_weight(conf_target);
+               if self.feerate_previous != 0 {
+                       // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
+                       if feerate_estimate as u64 > self.feerate_previous {
+                               feerate_estimate
+                       } else {
+                               // ...else just increase the previous feerate by 25% (because that's a nice number)
+                               (self.feerate_previous + (self.feerate_previous / 4)).try_into().unwrap_or(u32::max_value())
+                       }
+               } else {
+                       feerate_estimate
+               }
+       }
+
+       /// Determines whether a package contains an input which must have additional external inputs
+       /// attached to help the spending transaction reach confirmation.
+       pub(crate) fn requires_external_funding(&self) -> bool {
+               self.inputs.iter().find(|input| match input.1 {
+                       PackageSolvingData::HolderFundingOutput(ref outp) => outp.opt_anchors(),
+                       _ => false,
+               }).is_some()
+       }
+
        pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, aggregable: bool, height_original: u32) -> Self {
                let malleability = match input_solving_data {
-                       PackageSolvingData::RevokedOutput(..) => { PackageMalleability::Malleable },
-                       PackageSolvingData::RevokedHTLCOutput(..) => { PackageMalleability::Malleable },
-                       PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { PackageMalleability::Malleable },
-                       PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { PackageMalleability::Malleable },
-                       PackageSolvingData::HolderHTLCOutput(..) => { PackageMalleability::Untractable },
-                       PackageSolvingData::HolderFundingOutput(..) => { PackageMalleability::Untractable },
+                       PackageSolvingData::RevokedOutput(..) => PackageMalleability::Malleable,
+                       PackageSolvingData::RevokedHTLCOutput(..) => PackageMalleability::Malleable,
+                       PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => PackageMalleability::Malleable,
+                       PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => PackageMalleability::Malleable,
+                       PackageSolvingData::HolderHTLCOutput(..) => PackageMalleability::Untractable,
+                       PackageSolvingData::HolderFundingOutput(..) => PackageMalleability::Untractable,
                };
                let mut inputs = Vec::with_capacity(1);
                inputs.push((BitcoinOutPoint { txid, vout }, input_solving_data));
@@ -873,26 +936,26 @@ mod tests {
        }
 
        macro_rules! dumb_counterparty_output {
-               ($secp_ctx: expr, $amt: expr) => {
+               ($secp_ctx: expr, $amt: expr, $opt_anchors: expr) => {
                        {
                                let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
                                let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar);
                                let hash = PaymentHash([1; 32]);
                                let htlc = HTLCOutputInCommitment { offered: true, amount_msat: $amt, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None };
-                               PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, dumb_point, dumb_point, htlc))
+                               PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, dumb_point, dumb_point, htlc, $opt_anchors))
                        }
                }
        }
 
        macro_rules! dumb_counterparty_offered_output {
-               ($secp_ctx: expr, $amt: expr) => {
+               ($secp_ctx: expr, $amt: expr, $opt_anchors: expr) => {
                        {
                                let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
                                let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar);
                                let hash = PaymentHash([1; 32]);
                                let preimage = PaymentPreimage([2;32]);
                                let htlc = HTLCOutputInCommitment { offered: false, amount_msat: $amt, cltv_expiry: 1000, payment_hash: hash, transaction_output_index: None };
-                               PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, dumb_point, dumb_point, preimage, htlc))
+                               PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, dumb_point, dumb_point, preimage, htlc, $opt_anchors))
                        }
                }
        }
@@ -987,7 +1050,7 @@ mod tests {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
                let revk_outp = dumb_revk_output!(secp_ctx);
-               let counterparty_outp = dumb_counterparty_output!(secp_ctx, 0);
+               let counterparty_outp = dumb_counterparty_output!(secp_ctx, 0, false);
 
                let mut revoked_package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
                let counterparty_package = PackageTemplate::build_package(txid, 1, counterparty_outp, 1000, true, 100);
@@ -1051,7 +1114,7 @@ mod tests {
        fn test_package_amounts() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000);
+               let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000, false);
 
                let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
                assert_eq!(package.package_amount(), 1000);
@@ -1068,24 +1131,22 @@ mod tests {
                {
                        let revk_outp = dumb_revk_output!(secp_ctx);
                        let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, true, 100);
-                       for &opt_anchors in [false, true].iter() {
-                               assert_eq!(package.package_weight(&Script::new(), opt_anchors),  weight_sans_output + WEIGHT_REVOKED_OUTPUT as usize);
-                       }
+                       assert_eq!(package.package_weight(&Script::new()),  weight_sans_output + WEIGHT_REVOKED_OUTPUT as usize);
                }
 
                {
-                       let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000);
-                       let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
                        for &opt_anchors in [false, true].iter() {
-                               assert_eq!(package.package_weight(&Script::new(), opt_anchors), weight_sans_output + weight_received_htlc(opt_anchors) as usize);
+                               let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000, opt_anchors);
+                               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+                               assert_eq!(package.package_weight(&Script::new()), weight_sans_output + weight_received_htlc(opt_anchors) as usize);
                        }
                }
 
                {
-                       let counterparty_outp = dumb_counterparty_offered_output!(secp_ctx, 1_000_000);
-                       let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
                        for &opt_anchors in [false, true].iter() {
-                               assert_eq!(package.package_weight(&Script::new(), opt_anchors), weight_sans_output + weight_offered_htlc(opt_anchors) as usize);
+                               let counterparty_outp = dumb_counterparty_offered_output!(secp_ctx, 1_000_000, opt_anchors);
+                               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+                               assert_eq!(package.package_weight(&Script::new()), weight_sans_output + weight_offered_htlc(opt_anchors) as usize);
                        }
                }
        }
index df0378938b42eeb0b1ff0fbd1413f6f140617a12..15bc0d0e23e08079751972f32d751a76dc4706d7 100644 (file)
@@ -65,7 +65,7 @@ pub fn htlc_timeout_tx_weight(opt_anchors: bool) -> u64 {
        if opt_anchors { HTLC_TIMEOUT_ANCHOR_TX_WEIGHT } else { HTLC_TIMEOUT_TX_WEIGHT }
 }
 
-#[derive(PartialEq)]
+#[derive(PartialEq, Eq)]
 pub(crate) enum HTLCClaim {
        OfferedTimeout,
        OfferedPreimage,
@@ -208,6 +208,7 @@ pub struct CounterpartyCommitmentSecrets {
        old_secrets: [([u8; 32], u64); 49],
 }
 
+impl Eq for CounterpartyCommitmentSecrets {}
 impl PartialEq for CounterpartyCommitmentSecrets {
        fn eq(&self, other: &Self) -> bool {
                for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) {
@@ -419,7 +420,7 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
 /// channel basepoints via the new function, or they were obtained via
 /// CommitmentTransaction.trust().keys() because we trusted the source of the
 /// pre-calculated keys.
-#[derive(PartialEq, Clone)]
+#[derive(PartialEq, Eq, Clone)]
 pub struct TxCreationKeys {
        /// The broadcaster's per-commitment public key which was used to derive the other keys.
        pub per_commitment_point: PublicKey,
@@ -444,7 +445,7 @@ impl_writeable_tlv_based!(TxCreationKeys, {
 });
 
 /// One counterparty's public keys which do not change over the life of a channel.
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub struct ChannelPublicKeys {
        /// The public key which is used to sign all commitment transactions, as it appears in the
        /// on-chain channel lock-in 2-of-2 multisig output.
@@ -525,8 +526,8 @@ pub fn get_revokeable_redeemscript(revocation_key: &PublicKey, contest_delay: u1
        res
 }
 
-#[derive(Clone, PartialEq)]
 /// Information about an HTLC as it appears in a commitment transaction
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct HTLCOutputInCommitment {
        /// Whether the HTLC was "offered" (ie outbound in relation to this commitment transaction).
        /// Note that this is not the same as whether it is ountbound *from us*. To determine that you
@@ -726,6 +727,23 @@ pub fn get_anchor_redeemscript(funding_pubkey: &PublicKey) -> Script {
                .into_script()
 }
 
+#[cfg(anchors)]
+/// Locates the output with an anchor script paying to `funding_pubkey` within `commitment_tx`.
+pub(crate) fn get_anchor_output<'a>(commitment_tx: &'a Transaction, funding_pubkey: &PublicKey) -> Option<(u32, &'a TxOut)> {
+       let anchor_script = chan_utils::get_anchor_redeemscript(funding_pubkey).to_v0_p2wsh();
+       commitment_tx.output.iter().enumerate()
+               .find(|(_, txout)| txout.script_pubkey == anchor_script)
+               .map(|(idx, txout)| (idx as u32, txout))
+}
+
+/// Returns the witness required to satisfy and spend an anchor input.
+pub fn build_anchor_input_witness(funding_key: &PublicKey, funding_sig: &Signature) -> Witness {
+       let anchor_redeem_script = chan_utils::get_anchor_redeemscript(funding_key);
+       let mut funding_sig = funding_sig.serialize_der().to_vec();
+       funding_sig.push(EcdsaSighashType::All as u8);
+       Witness::from_vec(vec![funding_sig, anchor_redeem_script.to_bytes()])
+}
+
 /// Per-channel data used to build transactions in conjunction with the per-commitment data (CommitmentTransaction).
 /// The fields are organized by holder/counterparty.
 ///
@@ -882,6 +900,7 @@ impl Deref for HolderCommitmentTransaction {
        fn deref(&self) -> &Self::Target { &self.inner }
 }
 
+impl Eq for HolderCommitmentTransaction {}
 impl PartialEq for HolderCommitmentTransaction {
        // We dont care whether we are signed in equality comparison
        fn eq(&self, o: &Self) -> bool {
@@ -1007,7 +1026,7 @@ impl BuiltCommitmentTransaction {
 ///
 /// This class can be used inside a signer implementation to generate a signature given the relevant
 /// secret key.
-#[derive(Clone, Hash, PartialEq)]
+#[derive(Clone, Hash, PartialEq, Eq)]
 pub struct ClosingTransaction {
        to_holder_value_sat: u64,
        to_counterparty_value_sat: u64,
@@ -1147,6 +1166,7 @@ pub struct CommitmentTransaction {
        built: BuiltCommitmentTransaction,
 }
 
+impl Eq for CommitmentTransaction {}
 impl PartialEq for CommitmentTransaction {
        fn eq(&self, o: &Self) -> bool {
                let eq = self.commitment_number == o.commitment_number &&
index f234ad9f91f555075804d87db115d98193f09619..95771449ba47b1cf5e2001bf9e17b90b88776690 100644 (file)
@@ -1971,7 +1971,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
-                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
                if let Some((funding_txo, monitor_update)) = monitor_update_option {
@@ -3862,7 +3862,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                }
                                                        } else {
                                                                events::Event::ProbeFailed {
-                                                                       payment_id: payment_id,
+                                                                       payment_id,
                                                                        payment_hash: payment_hash.clone(),
                                                                        path: path.clone(),
                                                                        short_channel_id,
@@ -3909,7 +3909,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                                if self.payment_is_probe(payment_hash, &payment_id) {
                                                        events::Event::ProbeFailed {
-                                                               payment_id: payment_id,
+                                                               payment_id,
                                                                payment_hash: payment_hash.clone(),
                                                                path: path.clone(),
                                                                short_channel_id: Some(scid),
@@ -6499,7 +6499,7 @@ impl Readable for HTLCSource {
                                }
                                Ok(HTLCSource::OutboundRoute {
                                        session_priv: session_priv.0.unwrap(),
-                                       first_hop_htlc_msat: first_hop_htlc_msat,
+                                       first_hop_htlc_msat,
                                        path: path.unwrap(),
                                        payment_id: payment_id.unwrap(),
                                        payment_secret,
index 43dfef5d188bc51d8d0aa65df6965ef9ccaf7b1a..c3a57791c08b079840ea704989e4d1a616969f32 100644 (file)
@@ -1126,7 +1126,7 @@ impl SendEvent {
                assert!(updates.update_fail_htlcs.is_empty());
                assert!(updates.update_fail_malformed_htlcs.is_empty());
                assert!(updates.update_fee.is_none());
-               SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
+               SendEvent { node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
        }
 
        pub fn from_event(event: MessageSendEvent) -> SendEvent {
index 4c103e1199dfdf3c05d41c2ca3aa1d60de5fd384..6295fb705ae7712ddc60ee6c65ce1df15b6d4d36 100644 (file)
@@ -50,7 +50,7 @@ use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
 pub(crate) const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000;
 
 /// An error in decoding a message or struct.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum DecodeError {
        /// A version byte specified something we don't know how to handle.
        /// Includes unknown realm byte in an OnionHopData packet
@@ -73,7 +73,7 @@ pub enum DecodeError {
 }
 
 /// An init message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Init {
        /// The relevant features which the sender supports
        pub features: InitFeatures,
@@ -85,7 +85,7 @@ pub struct Init {
 }
 
 /// An error message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ErrorMessage {
        /// The channel ID involved in the error.
        ///
@@ -100,7 +100,7 @@ pub struct ErrorMessage {
 }
 
 /// A warning message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct WarningMessage {
        /// The channel ID involved in the warning.
        ///
@@ -114,7 +114,7 @@ pub struct WarningMessage {
 }
 
 /// A ping message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Ping {
        /// The desired response length
        pub ponglen: u16,
@@ -124,7 +124,7 @@ pub struct Ping {
 }
 
 /// A pong message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Pong {
        /// The pong packet size.
        /// This field is not sent on the wire. byteslen zeros are sent.
@@ -132,7 +132,7 @@ pub struct Pong {
 }
 
 /// An open_channel message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct OpenChannel {
        /// The genesis hash of the blockchain where the channel is to be opened
        pub chain_hash: BlockHash,
@@ -179,7 +179,7 @@ pub struct OpenChannel {
 }
 
 /// An accept_channel message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AcceptChannel {
        /// A temporary channel ID, until the funding outpoint is announced
        pub temporary_channel_id: [u8; 32],
@@ -220,7 +220,7 @@ pub struct AcceptChannel {
 }
 
 /// A funding_created message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct FundingCreated {
        /// A temporary channel ID, until the funding is established
        pub temporary_channel_id: [u8; 32],
@@ -233,7 +233,7 @@ pub struct FundingCreated {
 }
 
 /// A funding_signed message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct FundingSigned {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -242,7 +242,7 @@ pub struct FundingSigned {
 }
 
 /// A channel_ready message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelReady {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -254,7 +254,7 @@ pub struct ChannelReady {
 }
 
 /// A shutdown message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Shutdown {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -266,7 +266,7 @@ pub struct Shutdown {
 /// The minimum and maximum fees which the sender is willing to place on the closing transaction.
 /// This is provided in [`ClosingSigned`] by both sides to indicate the fee range they are willing
 /// to use.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ClosingSignedFeeRange {
        /// The minimum absolute fee, in satoshis, which the sender is willing to place on the closing
        /// transaction.
@@ -277,7 +277,7 @@ pub struct ClosingSignedFeeRange {
 }
 
 /// A closing_signed message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ClosingSigned {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -291,7 +291,7 @@ pub struct ClosingSigned {
 }
 
 /// An update_add_htlc message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateAddHTLC {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -307,7 +307,7 @@ pub struct UpdateAddHTLC {
 }
 
  /// An onion message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct OnionMessage {
        /// Used in decrypting the onion packet's payload.
        pub blinding_point: PublicKey,
@@ -315,7 +315,7 @@ pub struct OnionMessage {
 }
 
 /// An update_fulfill_htlc message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFulfillHTLC {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -326,7 +326,7 @@ pub struct UpdateFulfillHTLC {
 }
 
 /// An update_fail_htlc message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFailHTLC {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -336,7 +336,7 @@ pub struct UpdateFailHTLC {
 }
 
 /// An update_fail_malformed_htlc message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFailMalformedHTLC {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -348,7 +348,7 @@ pub struct UpdateFailMalformedHTLC {
 }
 
 /// A commitment_signed message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct CommitmentSigned {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -359,7 +359,7 @@ pub struct CommitmentSigned {
 }
 
 /// A revoke_and_ack message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct RevokeAndACK {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -370,7 +370,7 @@ pub struct RevokeAndACK {
 }
 
 /// An update_fee message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFee {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -378,7 +378,7 @@ pub struct UpdateFee {
        pub feerate_per_kw: u32,
 }
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// Proof that the sender knows the per-commitment secret of the previous commitment transaction.
 /// This is used to convince the recipient that the channel is at a certain commitment
 /// number even if they lost that data due to a local failure.  Of course, the peer may lie
@@ -392,7 +392,7 @@ pub struct DataLossProtect {
 }
 
 /// A channel_reestablish message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelReestablish {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -405,7 +405,7 @@ pub struct ChannelReestablish {
 }
 
 /// An announcement_signatures message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AnnouncementSignatures {
        /// The channel ID
        pub channel_id: [u8; 32],
@@ -418,7 +418,7 @@ pub struct AnnouncementSignatures {
 }
 
 /// An address which can be used to connect to a remote peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum NetAddress {
        /// An IPv4 address/port on which the peer is listening.
        IPv4 {
@@ -573,7 +573,7 @@ impl Readable for NetAddress {
 
 
 /// The unsigned part of a node_announcement
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UnsignedNodeAnnouncement {
        /// The advertised features
        pub features: NodeFeatures,
@@ -592,7 +592,7 @@ pub struct UnsignedNodeAnnouncement {
        pub(crate) excess_address_data: Vec<u8>,
        pub(crate) excess_data: Vec<u8>,
 }
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// A node_announcement message to be sent or received from a peer
 pub struct NodeAnnouncement {
        /// The signature by the node key
@@ -602,7 +602,7 @@ pub struct NodeAnnouncement {
 }
 
 /// The unsigned part of a channel_announcement
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UnsignedChannelAnnouncement {
        /// The advertised channel features
        pub features: ChannelFeatures,
@@ -621,7 +621,7 @@ pub struct UnsignedChannelAnnouncement {
        pub(crate) excess_data: Vec<u8>,
 }
 /// A channel_announcement message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelAnnouncement {
        /// Authentication of the announcement by the first public node
        pub node_signature_1: Signature,
@@ -636,7 +636,7 @@ pub struct ChannelAnnouncement {
 }
 
 /// The unsigned part of a channel_update
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UnsignedChannelUpdate {
        /// The genesis hash of the blockchain where the channel is to be opened
        pub chain_hash: BlockHash,
@@ -669,7 +669,7 @@ pub struct UnsignedChannelUpdate {
        pub excess_data: Vec<u8>,
 }
 /// A channel_update message to be sent or received from a peer
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelUpdate {
        /// A signature of the channel update
        pub signature: Signature,
@@ -681,7 +681,7 @@ pub struct ChannelUpdate {
 /// UTXOs in a range of blocks. The recipient of a query makes a best
 /// effort to reply to the query using one or more reply_channel_range
 /// messages.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct QueryChannelRange {
        /// The genesis hash of the blockchain being queried
        pub chain_hash: BlockHash,
@@ -698,7 +698,7 @@ pub struct QueryChannelRange {
 /// not be a perfect view of the network. The short_channel_ids in the
 /// reply are encoded. We only support encoding_type=0 uncompressed
 /// serialization and do not support encoding_type=1 zlib serialization.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ReplyChannelRange {
        /// The genesis hash of the blockchain being queried
        pub chain_hash: BlockHash,
@@ -720,7 +720,7 @@ pub struct ReplyChannelRange {
 /// reply_short_channel_ids_end message. The short_channel_ids sent in
 /// this query are encoded. We only support encoding_type=0 uncompressed
 /// serialization and do not support encoding_type=1 zlib serialization.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct QueryShortChannelIds {
        /// The genesis hash of the blockchain being queried
        pub chain_hash: BlockHash,
@@ -732,7 +732,7 @@ pub struct QueryShortChannelIds {
 /// query_short_channel_ids message. The query recipient makes a best
 /// effort to respond based on their local network view which may not be
 /// a perfect view of the network.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ReplyShortChannelIdsEnd {
        /// The genesis hash of the blockchain that was queried
        pub chain_hash: BlockHash,
@@ -744,7 +744,7 @@ pub struct ReplyShortChannelIdsEnd {
 /// A gossip_timestamp_filter message is used by a node to request
 /// gossip relay for messages in the requested time range when the
 /// gossip_queries feature has been negotiated.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct GossipTimestampFilter {
        /// The genesis hash of the blockchain for channel and node information
        pub chain_hash: BlockHash,
@@ -805,7 +805,7 @@ pub struct LightningError {
 
 /// Struct used to return values from revoke_and_ack messages, containing a bunch of commitment
 /// transaction updates if they were pending.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct CommitmentUpdate {
        /// update_add_htlc messages which should be sent
        pub update_add_htlcs: Vec<UpdateAddHTLC>,
@@ -826,7 +826,7 @@ pub struct CommitmentUpdate {
 /// OptionalFeild simply gets Present if there are enough bytes to read into it), we have a
 /// separate enum type for them.
 /// (C-not exported) due to a free generic in T
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum OptionalField<T> {
        /// Optional field is included in message
        Present(T),
@@ -1083,6 +1083,7 @@ impl onion_utils::Packet for OnionPacket {
        }
 }
 
+impl Eq for OnionPacket { }
 impl PartialEq for OnionPacket {
        fn eq(&self, other: &OnionPacket) -> bool {
                for (i, j) in self.hop_data.iter().zip(other.hop_data.iter()) {
@@ -1100,7 +1101,7 @@ impl fmt::Debug for OnionPacket {
        }
 }
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub(crate) struct OnionErrorPacket {
        // This really should be a constant size slice, but the spec lets these things be up to 128KB?
        // (TODO) We limit it in decode to much lower...
index 09363b6b51749f033bb482db1ac7b50b28ffaaf2..7b7125891dd08819d821c8f8d2c434076aa63b6b 100644 (file)
@@ -18,7 +18,7 @@ use io;
 /// A script pubkey for shutting down a channel as defined by [BOLT #2].
 ///
 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub struct ShutdownScript(ShutdownScriptImpl);
 
 /// An error occurring when converting from [`Script`] to [`ShutdownScript`].
@@ -30,7 +30,7 @@ pub struct InvalidShutdownScript {
        pub script: Script
 }
 
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 enum ShutdownScriptImpl {
        /// [`PublicKey`] used to form a P2WPKH script pubkey. Used to support backward-compatible
        /// serialization.
index e2409fc45d6530c22e9aa62c7061335204f6a81f..3677efda420cc8663914512ea38c732216de8f59 100644 (file)
@@ -114,7 +114,7 @@ impl Destination {
 /// Errors that may occur when [sending an onion message].
 ///
 /// [sending an onion message]: OnionMessenger::send_onion_message
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Eq)]
 pub enum SendError {
        /// Errored computing onion message packet keys.
        Secp256k1(secp256k1::Error),
index 1337bdb14d5d6c3bf83fcadc5beebcea8b6d70d8..20b1fb0b82fdf5cf0c0b16096f524110d20ba6da 100644 (file)
@@ -27,7 +27,7 @@ use prelude::*;
 pub(super) const SMALL_PACKET_HOP_DATA_LEN: usize = 1300;
 pub(super) const BIG_PACKET_HOP_DATA_LEN: usize = 32768;
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub(crate) struct Packet {
        pub(super) version: u8,
        pub(super) public_key: PublicKey,
index 26f68e27e583c0d69a54d8538cc4941b93ffd52f..47d7dab6d1132121a5da3bacf2e57cb52befb2ab 100644 (file)
@@ -164,7 +164,7 @@ pub struct ReadOnlyNetworkGraph<'a> {
 /// return packet by a node along the route. See [BOLT #4] for details.
 ///
 /// [BOLT #4]: https://github.com/lightning/bolts/blob/master/04-onion-routing.md
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum NetworkUpdate {
        /// An error indicating a `channel_update` messages should be applied via
        /// [`NetworkGraph::update_channel`].
@@ -626,7 +626,7 @@ where
        }
 }
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// Details about one direction of a channel as received within a [`ChannelUpdate`].
 pub struct ChannelUpdateInfo {
        /// When the last update to the channel direction was issued.
@@ -709,7 +709,7 @@ impl Readable for ChannelUpdateInfo {
        }
 }
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// Details about a channel (both directions).
 /// Received within a channel announcement.
 pub struct ChannelInfo {
@@ -1017,7 +1017,7 @@ impl_writeable_tlv_based!(RoutingFees, {
        (2, proportional_millionths, required)
 });
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// Information received in the latest node_announcement from this node.
 pub struct NodeAnnouncementInfo {
        /// Protocol features the node announced support for
@@ -1053,7 +1053,7 @@ impl_writeable_tlv_based!(NodeAnnouncementInfo, {
 ///
 /// Since node aliases are provided by third parties, they are a potential avenue for injection
 /// attacks. Care must be taken when processing.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct NodeAlias(pub [u8; 32]);
 
 impl fmt::Display for NodeAlias {
@@ -1094,7 +1094,7 @@ impl Readable for NodeAlias {
        }
 }
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// Details about a node in the network, known from the network announcement.
 pub struct NodeInfo {
        /// All valid channels a node has announced
@@ -1247,6 +1247,7 @@ impl<L: Deref> fmt::Display for NetworkGraph<L> where L::Target: Logger {
        }
 }
 
+impl<L: Deref> Eq for NetworkGraph<L> where L::Target: Logger {}
 impl<L: Deref> PartialEq for NetworkGraph<L> where L::Target: Logger {
        fn eq(&self, other: &Self) -> bool {
                self.genesis_hash == other.genesis_hash &&
index efba350ce2743f816c39e6c20698a15c3e222454..170ac9c991c43b3f4b079a026bd388801a6ca12d 100644 (file)
@@ -1153,7 +1153,7 @@ where L::Target: Logger {
                                                                lowest_fee_to_peer_through_node: total_fee_msat,
                                                                lowest_fee_to_node: $next_hops_fee_msat as u64 + hop_use_fee_msat,
                                                                total_cltv_delta: hop_total_cltv_delta,
-                                                               value_contribution_msat: value_contribution_msat,
+                                                               value_contribution_msat,
                                                                path_htlc_minimum_msat,
                                                                path_penalty_msat,
                                                                path_length_to_node,
index d1195dc8d674b343fcb769f9490724e4a4c14d21..fefdebfccd30f8b38966e58b7650e81ba06ec080 100644 (file)
@@ -315,19 +315,28 @@ type ConfiguredTime = Eternity;
 
 /// [`Score`] implementation using channel success probability distributions.
 ///
-/// Based on *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
-/// and Stefan Richter [[1]]. Given the uncertainty of channel liquidity balances, probability
-/// distributions are defined based on knowledge learned from successful and unsuccessful attempts.
-/// Then the negative `log10` of the success probability is used to determine the cost of routing a
-/// specific HTLC amount through a channel.
+/// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
+/// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
+/// When a payment is forwarded through a channel (but fails later in the route), we learn the
+/// lower-bound on the channel's available liquidity must be at least the value of the HTLC.
 ///
-/// Knowledge about channel liquidity balances takes the form of upper and lower bounds on the
-/// possible liquidity. Certainty of the bounds is decreased over time using a decay function. See
-/// [`ProbabilisticScoringParameters`] for details.
+/// These bounds are then used to determine a success probability using the formula from
+/// *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
+/// and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
 ///
-/// Since the scorer aims to learn the current channel liquidity balances, it works best for nodes
-/// with high payment volume or that actively probe the [`NetworkGraph`]. Nodes with low payment
-/// volume are more likely to experience failed payment paths, which would need to be retried.
+/// This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
+/// [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
+/// milli-satoshis. The penalties, when added across all hops, have the property of being linear in
+/// terms of the entire path's success probability. This allows the router to directly compare
+/// penalties for different paths. See the documentation of those parameters for the exact formulas.
+///
+/// The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
+///
+/// Further, we track the history of our upper and lower liquidity bounds for each channel,
+/// allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
+/// and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
+/// formula, but using the history of a channel rather than our latest estimates for the liquidity
+/// bounds.
 ///
 /// # Note
 ///
@@ -335,6 +344,11 @@ type ConfiguredTime = Eternity;
 /// behavior.
 ///
 /// [1]: https://arxiv.org/abs/2107.05322
+/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_multiplier_msat
+/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_amount_multiplier_msat
+/// [`liquidity_offset_half_life`]: ProbabilisticScoringParameters::liquidity_offset_half_life
+/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_multiplier_msat
+/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_amount_multiplier_msat
 pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
 
 /// Probabilistic [`Score`] implementation.
@@ -388,19 +402,27 @@ pub struct ProbabilisticScoringParameters {
        /// uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will
        /// result in a `u64::max_value` penalty, however.
        ///
+       /// `-log10(success_probability) * liquidity_penalty_multiplier_msat`
+       ///
        /// Default value: 30,000 msat
        ///
        /// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life
        pub liquidity_penalty_multiplier_msat: u64,
 
-       /// The time required to elapse before any knowledge learned about channel liquidity balances is
-       /// cut in half.
+       /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
+       /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on
+       /// the available liquidity is halved and the upper-bound moves half-way to the channel's total
+       /// capacity.
+       ///
+       /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity,
+       /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`]
+       /// struct documentation for more info on the way the liquidity bounds are used.
        ///
-       /// The bounds are defined in terms of offsets and are initially zero. Increasing the offsets
-       /// gives tighter bounds on the channel liquidity balance. Thus, halving the offsets decreases
-       /// the certainty of the channel liquidity balance.
+       /// For example, if the channel's capacity is 1 million sats, and the current upper and lower
+       /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper
+       /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats.
        ///
-       /// Default value: 1 hour
+       /// Default value: 6 hours
        ///
        /// # Note
        ///
@@ -758,7 +780,7 @@ impl ProbabilisticScoringParameters {
                        base_penalty_msat: 0,
                        base_penalty_amount_multiplier_msat: 0,
                        liquidity_penalty_multiplier_msat: 0,
-                       liquidity_offset_half_life: Duration::from_secs(3600),
+                       liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
                        liquidity_penalty_amount_multiplier_msat: 0,
                        historical_liquidity_penalty_multiplier_msat: 0,
                        historical_liquidity_penalty_amount_multiplier_msat: 0,
@@ -784,7 +806,7 @@ impl Default for ProbabilisticScoringParameters {
                        base_penalty_msat: 500,
                        base_penalty_amount_multiplier_msat: 8192,
                        liquidity_penalty_multiplier_msat: 30_000,
-                       liquidity_offset_half_life: Duration::from_secs(3600),
+                       liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
                        liquidity_penalty_amount_multiplier_msat: 192,
                        historical_liquidity_penalty_multiplier_msat: 10_000,
                        historical_liquidity_penalty_amount_multiplier_msat: 64,
index 5fddb57eb36a0c1a2d119baeabd7d0a757509ee6..3254e8b0134d298517fcc9095792fb48c9935b50 100644 (file)
@@ -335,7 +335,7 @@ mod tests {
        use util::ser::{self, FixedLengthReader, LengthReadableArgs, Writeable};
 
        // Used for for testing various lengths of serialization.
-       #[derive(Debug, PartialEq)]
+       #[derive(Debug, PartialEq, Eq)]
        struct TestWriteable {
                field1: Vec<u8>,
                field2: Vec<u8>,
index b2004df3e9cbd0c043c2b2ef184e538f267ea02a..f44e43b7db4426ce724a2346b3f5ecd015a8d301 100644 (file)
@@ -274,7 +274,7 @@ impl Default for ChannelHandshakeLimits {
 
 /// Options which apply on a per-channel basis and may change at runtime or based on negotiation
 /// with our counterparty.
-#[derive(Copy, Clone, Debug, PartialEq)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub struct ChannelConfig {
        /// Amount (in millionths of a satoshi) charged per satoshi for payments forwarded outbound
        /// over the channel.
index f4450cc129d81979cedbf1580e5cc3a0450dad62..34b5954d48525b4555ba45b3de803b334ab24007 100644 (file)
@@ -7,6 +7,7 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
+use ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
 use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
 use ln::{chan_utils, msgs, PaymentPreimage};
 use chain::keysinterface::{Sign, InMemorySigner, BaseSign};
@@ -199,6 +200,16 @@ impl BaseSign for EnforcingSigner {
                Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
        }
 
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &mut Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()> {
+               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
+               // As long as our minimum dust limit is enforced and is greater than our anchor output
+               // value, an anchor output can only have an index within [0, 1].
+               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
+               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
+       }
+
        fn sign_channel_announcement(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>)
        -> Result<(Signature, Signature), ()> {
                self.inner.sign_channel_announcement(msg, secp_ctx)
index ad699354233cb705fd41221cee58293a3a62c3d2..f00d2ab2d0e741a3b833b56b90ec8edae7fbce93 100644 (file)
@@ -16,7 +16,7 @@ use core::fmt;
 
 /// Indicates an error on the client's part (usually some variant of attempting to use too-low or
 /// too-high values)
-#[derive(Clone, PartialEq)]
+#[derive(Clone, PartialEq, Eq)]
 pub enum APIError {
        /// Indicates the API was wholly misused (see err for more). Cases where these can be returned
        /// are documented, but generally indicates some precondition of a function was violated.
index 8ddd762e97036bac77fee08c2be7819ec707388e..20f1c5b786cf66d4c90fbe0a28c6dca3d351f8dc 100644 (file)
@@ -15,6 +15,7 @@
 //! few other things.
 
 use chain::keysinterface::SpendableOutputDescriptor;
+use ln::chan_utils::HTLCOutputInCommitment;
 use ln::channelmanager::PaymentId;
 use ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
 use ln::features::ChannelTypeFeatures;
@@ -25,7 +26,7 @@ use routing::gossip::NetworkUpdate;
 use util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
 use routing::router::{RouteHop, RouteParameters};
 
-use bitcoin::{PackedLockTime, Transaction};
+use bitcoin::{PackedLockTime, Transaction, OutPoint};
 use bitcoin::blockdata::script::Script;
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -74,7 +75,7 @@ impl_writeable_tlv_based_enum!(PaymentPurpose,
        (2, SpontaneousPayment)
 );
 
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 /// The reason the channel was closed. See individual variants more details.
 pub enum ClosureReason {
        /// Closure generated from receiving a peer error message.
@@ -153,7 +154,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
 );
 
 /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum HTLCDestination {
        /// We tried forwarding to a channel but failed to do so. An example of such an instance is when
        /// there is insufficient capacity in our outbound channel.
@@ -196,6 +197,84 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCDestination,
        }
 );
 
+/// A descriptor used to sign for a commitment transaction's anchor output.
+#[derive(Clone, Debug)]
+pub struct AnchorDescriptor {
+       /// A unique identifier used along with `channel_value_satoshis` to re-derive the
+       /// [`InMemorySigner`] required to sign `input`.
+       ///
+       /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+       pub channel_keys_id: [u8; 32],
+       /// The value in satoshis of the channel we're attempting to spend the anchor output of. This is
+       /// used along with `channel_keys_id` to re-derive the [`InMemorySigner`] required to sign
+       /// `input`.
+       ///
+       /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+       pub channel_value_satoshis: u64,
+       /// The transaction input's outpoint corresponding to the commitment transaction's anchor
+       /// output.
+       pub outpoint: OutPoint,
+}
+
+/// Represents the different types of transactions, originating from LDK, to be bumped.
+#[derive(Clone, Debug)]
+pub enum BumpTransactionEvent {
+       /// Indicates that a channel featuring anchor outputs is to be closed by broadcasting the local
+       /// commitment transaction. Since commitment transactions have a static feerate pre-agreed upon,
+       /// they may need additional fees to be attached through a child transaction using the popular
+       /// [Child-Pays-For-Parent](https://bitcoinops.org/en/topics/cpfp) fee bumping technique. This
+       /// child transaction must include the anchor input described within `anchor_descriptor` along
+       /// with additional inputs to meet the target feerate. Failure to meet the target feerate
+       /// decreases the confirmation odds of the transaction package (which includes the commitment
+       /// and child anchor transactions), possibly resulting in a loss of funds. Once the transaction
+       /// is constructed, it must be fully signed for and broadcasted by the consumer of the event
+       /// along with the `commitment_tx` enclosed. Note that the `commitment_tx` must always be
+       /// broadcast first, as the child anchor transaction depends on it.
+       ///
+       /// The consumer should be able to sign for any of the additional inputs included within the
+       /// child anchor transaction. To sign its anchor input, an [`InMemorySigner`] should be
+       /// re-derived through [`KeysManager::derive_channel_keys`] with the help of
+       /// [`AnchorDescriptor::channel_keys_id`] and [`AnchorDescriptor::channel_value_satoshis`].
+       ///
+       /// It is possible to receive more than one instance of this event if a valid child anchor
+       /// transaction is never broadcast or is but not with a sufficient fee to be mined. Care should
+       /// be taken by the consumer of the event to ensure any future iterations of the child anchor
+       /// transaction adhere to the [Replace-By-Fee
+       /// rules](https://github.com/bitcoin/bitcoin/blob/master/doc/policy/mempool-replacements.md)
+       /// for fee bumps to be accepted into the mempool, and eventually the chain. As the frequency of
+       /// these events is not user-controlled, users may ignore/drop the event if they are no longer
+       /// able to commit external confirmed funds to the child anchor transaction.
+       ///
+       /// The set of `pending_htlcs` on the commitment transaction to be broadcast can be inspected to
+       /// determine whether a significant portion of the channel's funds are allocated to HTLCs,
+       /// enabling users to make their own decisions regarding the importance of the commitment
+       /// transaction's confirmation. Note that this is not required, but simply exists as an option
+       /// for users to override LDK's behavior. On commitments with no HTLCs (indicated by those with
+       /// an empty `pending_htlcs`), confirmation of the commitment transaction can be considered to
+       /// be not urgent.
+       ///
+       /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner
+       /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys
+       ChannelClose {
+               /// The target feerate that the transaction package, which consists of the commitment
+               /// transaction and the to-be-crafted child anchor transaction, must meet.
+               package_target_feerate_sat_per_1000_weight: u32,
+               /// The channel's commitment transaction to bump the fee of. This transaction should be
+               /// broadcast along with the anchor transaction constructed as a result of consuming this
+               /// event.
+               commitment_tx: Transaction,
+               /// The absolute fee in satoshis of the commitment transaction. This can be used along the
+               /// with weight of the commitment transaction to determine its feerate.
+               commitment_tx_fee_satoshis: u64,
+               /// The descriptor to sign the anchor input of the anchor transaction constructed as a
+               /// result of consuming this event.
+               anchor_descriptor: AnchorDescriptor,
+               /// The set of pending HTLCs on the commitment transaction that need to be resolved once the
+               /// commitment transaction confirms.
+               pending_htlcs: Vec<HTLCOutputInCommitment>,
+       },
+}
+
 /// An Event which you should probably take some action in response to.
 ///
 /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
@@ -602,6 +681,13 @@ pub enum Event {
                /// Destination of the HTLC that failed to be processed.
                failed_next_destination: HTLCDestination,
        },
+       #[cfg(anchors)]
+       /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event
+       /// requires confirmed external funds to be readily available to spend.
+       ///
+       /// LDK does not currently generate this event. It is limited to the scope of channels with
+       /// anchor outputs, which will be introduced in a future release.
+       BumpTransaction(BumpTransactionEvent),
 }
 
 impl Writeable for Event {
@@ -753,6 +839,15 @@ impl Writeable for Event {
                                        (2, failed_next_destination, required),
                                })
                        },
+                       #[cfg(anchors)]
+                       &Event::BumpTransaction(ref event)=> {
+                               27u8.write(writer)?;
+                               match event {
+                                       // We never write the ChannelClose events as they'll be replayed upon restarting
+                                       // anyway if the commitment transaction remains unconfirmed.
+                                       BumpTransactionEvent::ChannelClose { .. } => {}
+                               }
+                       }
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
index 676c303bfa8dc1384b404cfe355aa77d99990242..79ef415c7b24f39e0034ad0d299ef8492172fd71 100644 (file)
@@ -20,7 +20,7 @@ pub const MAX_SCID_TX_INDEX: u64 = 0x00ffffff;
 pub const MAX_SCID_VOUT_INDEX: u64 = 0xffff;
 
 /// A `short_channel_id` construction error
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Eq)]
 pub enum ShortChannelIdError {
        BlockOverflow,
        TxIndexOverflow,
index 6ef29073837aa77d856afa37f65c737e6638abe6..3de03ea5a0d5efd93b4c3a22e4d88845656dbf23 100644 (file)
@@ -399,7 +399,7 @@ impl Readable for BigSize {
 /// In TLV we occasionally send fields which only consist of, or potentially end with, a
 /// variable-length integer which is simply truncated by skipping high zero bytes. This type
 /// encapsulates such integers implementing Readable/Writeable for them.
-#[cfg_attr(test, derive(PartialEq, Debug))]
+#[cfg_attr(test, derive(PartialEq, Eq, Debug))]
 pub(crate) struct HighZeroBytesDroppedBigSize<T>(pub T);
 
 macro_rules! impl_writeable_primitive {
@@ -979,7 +979,7 @@ impl Readable for String {
 /// The character set consists of ASCII alphanumeric characters, hyphens, and periods.
 /// Its length is guaranteed to be representable by a single byte.
 /// This serialization is used by BOLT 7 hostnames.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Hostname(String);
 impl Hostname {
        /// Returns the length of the hostname.
diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100644 (file)
index 0000000..91b8023
--- /dev/null
@@ -0,0 +1 @@
+disable_all_formatting = true
\ No newline at end of file