Merge pull request #2400 from TheBlueMatt/2023-07-kill-vec_type
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 11 Jul 2023 19:58:34 +0000 (19:58 +0000)
committerGitHub <noreply@github.com>
Tue, 11 Jul 2023 19:58:34 +0000 (19:58 +0000)
Fix backwards compat for blocked_monitor_updates and finally kill `vec_type`

17 files changed:
.github/workflows/build.yml
fuzz/src/full_stack.rs
fuzz/src/router.rs
lightning-invoice/src/utils.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/util/config.rs
lightning/src/util/ser_macros.rs

index 6795c618c2bd2e07ae02ed0362a791183d681b42..9ebfd1257216271357018313999e0d9976e85cd5 100644 (file)
@@ -37,11 +37,9 @@ jobs:
       - name: Checkout source code
         uses: actions/checkout@v3
       - name: Install Rust ${{ matrix.toolchain }} toolchain
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: ${{ matrix.toolchain }}
-          override: true
-          profile: minimal
+        run: |
+          curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }}
+          rustup override set ${{ matrix.toolchain }}
       - name: Install no-std-check dependencies for ARM Embedded
         if: "matrix.platform == 'ubuntu-latest'"
         run: |
@@ -101,11 +99,9 @@ jobs:
       - name: Checkout source code
         uses: actions/checkout@v3
       - name: Install Rust ${{ env.TOOLCHAIN }} toolchain
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: ${{ env.TOOLCHAIN }}
-          override: true
-          profile: minimal
+        run: |
+          curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }}
+          rustup override set ${{ env.TOOLCHAIN }}
       - name: Cache routing graph snapshot
         id: cache-graph
         uses: actions/cache@v3
@@ -158,11 +154,9 @@ jobs:
         with:
           fetch-depth: 0
       - name: Install Rust ${{ env.TOOLCHAIN }} toolchain
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: ${{ env.TOOLCHAIN }}
-          override: true
-          profile: minimal
+        run: |
+          curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }}
+          rustup override set ${{ env.TOOLCHAIN }}
       - name: Fetch full tree and rebase on upstream
         run: |
           git remote add upstream https://github.com/lightningdevkit/rust-lightning
@@ -183,11 +177,9 @@ jobs:
         with:
           fetch-depth: 0
       - name: Install Rust ${{ env.TOOLCHAIN }} toolchain
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: ${{ env.TOOLCHAIN }}
-          override: true
-          profile: minimal
+        run: |
+          curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }}
+          rustup override set ${{ env.TOOLCHAIN }}
       - name: Run cargo check for release build.
         run: |
           cargo check --release
@@ -207,16 +199,14 @@ jobs:
   fuzz:
     runs-on: ubuntu-latest
     env:
-      TOOLCHAIN: stable
+      TOOLCHAIN: 1.58
     steps:
       - name: Checkout source code
         uses: actions/checkout@v3
-      - name: Install Rust 1.58 toolchain
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: 1.58
-          override: true
-          profile: minimal
+      - name: Install Rust ${{ env.TOOLCHAIN }} toolchain
+        run: |
+          curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }}
+          rustup override set ${{ env.TOOLCHAIN }}
       - name: Install dependencies for honggfuzz
         run: |
           sudo apt-get update
@@ -236,11 +226,9 @@ jobs:
       - name: Checkout source code
         uses: actions/checkout@v3
       - name: Install Rust ${{ env.TOOLCHAIN }} toolchain
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: ${{ env.TOOLCHAIN }}
-          override: true
-          profile: minimal
+        run: |
+          curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }}
+          rustup override set ${{ env.TOOLCHAIN }}
       - name: Install clippy
         run: |
           rustup component add clippy
index 2b9449d72b8ed00c978bb3ad817062ccef0a1f9a..9caf91040346c45436671cb0f4c3e74ad1cbb373 100644 (file)
@@ -43,7 +43,7 @@ use lightning::ln::functional_test_utils::*;
 use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
-use lightning::util::config::UserConfig;
+use lightning::util::config::{UserConfig, MaxDustHTLCExposure};
 use lightning::util::errors::APIError;
 use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
 use lightning::util::logger::Logger;
@@ -439,6 +439,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        });
        let mut config = UserConfig::default();
        config.channel_config.forwarding_fee_proportional_millionths =  slice_to_be32(get_slice!(4));
+       config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
        config.channel_handshake_config.announced_channel = get_slice!(1)[0] != 0;
        let network = Network::Bitcoin;
        let best_block_timestamp = genesis_block(network).header.time;
@@ -817,6 +818,8 @@ mod tests {
                //
                // 0a - create the funding transaction (client should send funding_created now)
                //
+               // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // 030112 - inbound read from peer id 1 of len 18
                // 0062 01000000000000000000000000000000 - message header indicating message length 98
                // 030172 - inbound read from peer id 1 of len 114
@@ -845,6 +848,8 @@ mod tests {
                // 0300c1 - inbound read from peer id 0 of len 193
                // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
                //
+               // 00fd - One feerate request (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // 030012 - inbound read from peer id 0 of len 18
                // 0064 03000000000000000000000000000000 - message header indicating message length 100
                // 030074 - inbound read from peer id 0 of len 116
@@ -859,6 +864,8 @@ mod tests {
                // 07 - process the now-pending HTLC forward
                // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
                //
+               // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // - we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
                // 030112 - inbound read from peer id 1 of len 18
                // 0064 01000000000000000000000000000000 - message header indicating message length 100
@@ -902,6 +909,8 @@ mod tests {
                // 0300c1 - inbound read from peer id 0 of len 193
                // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
                //
+               // 00fd - One feerate request (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // - now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
                // 030012 - inbound read from peer id 0 of len 18
                // 0063 03000000000000000000000000000000 - message header indicating message length 99
@@ -923,6 +932,8 @@ mod tests {
                // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
                // - we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
                //
+               // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // 030112 - inbound read from peer id 1 of len 18
                // 0064 01000000000000000000000000000000 - message header indicating message length 100
                // 030174 - inbound read from peer id 1 of len 116
@@ -978,6 +989,8 @@ mod tests {
                // 0300c1 - inbound read from peer id 0 of len 193
                // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
                //
+               // 00fd - One feerate request (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // 030012 - inbound read from peer id 0 of len 18
                // 00a4 03000000000000000000000000000000 - message header indicating message length 164
                // 0300b4 - inbound read from peer id 0 of len 180
@@ -992,6 +1005,8 @@ mod tests {
                // 07 - process the now-pending HTLC forward
                // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
                //
+               // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+               //
                // 0c007d - connect a block with one transaction of len 125
                // 02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c0000000000000160014280000000000000000000000000000000000000005000020 - the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000
                //
@@ -1007,7 +1022,7 @@ mod tests {
                // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
 
                let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
-               super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012001003000000000000000000000000000000030020001000021aaa0008aaaaaaaaaaaa9aaa030000000000000000000000000000000300120147030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030059030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010000010210000300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112001001000000000000000000000000000000030120001000021aaa0008aaaaaaaaaaaa9aaa01000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120112010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f000050300000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000002000300000000000000000000000000000000000000000000000000000000000003000300000000000000000000000000000000000000000000000000000000000004000300000000000000000000000000000000000000000000000000000000000005000266000000000000000000000000000003012300000000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
+               super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012001003000000000000000000000000000000030020001000021aaa0008aaaaaaaaaaaa9aaa030000000000000000000000000000000300120147030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030059030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010000010210000300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112001001000000000000000000000000000000030120001000021aaa0008aaaaaaaaaaaa9aaa01000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120112010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f000050300000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000002000300000000000000000000000000000000000000000000000000000000000003000300000000000000000000000000000000000000000000000000000000000004000300000000000000000000000000000000000000000000000000000000000005000266000000000000000000000000000003012300000000000000000000000000000000000000010000000000000000000000000000000a00fd00fd03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000700fd00fd03011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000700fd00fd03011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000700fd00fd0c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
 
                let log_entries = logger.lines.lock().unwrap();
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
index 72935f153eab861f00509c310f88d5d822ea10fd..31732257c3f1a1c099161c8b982f1f5f35d701b2 100644 (file)
@@ -270,6 +270,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                                inbound_htlc_maximum_msat: None,
                                                                config: None,
                                                                feerate_sat_per_1000_weight: None,
+                                                               channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown),
                                                        });
                                                }
                                                Some(&first_hops_vec[..])
index c052706975968105c95b7dbf08c4ec7132b6c6c5..199aad064694c975d6e97cc132e3b460a84c1b71 100644 (file)
@@ -792,12 +792,13 @@ fn prefer_current_channel(min_inbound_capacity_msat: Option<u64>, current_channe
 
 #[cfg(test)]
 mod test {
+       use core::cell::RefCell;
        use core::time::Duration;
        use crate::{Currency, Description, InvoiceDescription, SignOrCreationError, CreationError};
        use bitcoin_hashes::{Hash, sha256};
        use bitcoin_hashes::sha256::Hash as Sha256;
        use lightning::sign::PhantomKeysManager;
-       use lightning::events::{MessageSendEvent, MessageSendEventsProvider, Event};
+       use lightning::events::{MessageSendEvent, MessageSendEventsProvider, Event, EventsProvider};
        use lightning::ln::{PaymentPreimage, PaymentHash};
        use lightning::ln::channelmanager::{PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields, Retry};
        use lightning::ln::functional_test_utils::*;
@@ -1357,13 +1358,20 @@ mod test {
                // Note that we have to "forward pending HTLCs" twice before we see the PaymentClaimable as
                // this "emulates" the payment taking two hops, providing some privacy to make phantom node
                // payments "look real" by taking more time.
-               expect_pending_htlcs_forwardable_ignore!(nodes[fwd_idx]);
-               nodes[fwd_idx].node.process_pending_htlc_forwards();
-               expect_pending_htlcs_forwardable_ignore!(nodes[fwd_idx]);
-               nodes[fwd_idx].node.process_pending_htlc_forwards();
+               let other_events = RefCell::new(Vec::new());
+               let forward_event_handler = |event: Event| {
+                       if let Event::PendingHTLCsForwardable { .. } = event {
+                               nodes[fwd_idx].node.process_pending_htlc_forwards();
+                       } else {
+                               other_events.borrow_mut().push(event);
+                       }
+               };
+               nodes[fwd_idx].node.process_pending_events(&forward_event_handler);
+               nodes[fwd_idx].node.process_pending_events(&forward_event_handler);
 
                let payment_preimage_opt = if user_generated_pmt_hash { None } else { Some(payment_preimage) };
-               expect_payment_claimable!(&nodes[fwd_idx], payment_hash, payment_secret, payment_amt, payment_preimage_opt, invoice.recover_payee_pub_key());
+               assert_eq!(other_events.borrow().len(), 1);
+               check_payment_claimable(&other_events.borrow()[0], payment_hash, payment_secret, payment_amt, payment_preimage_opt, invoice.recover_payee_pub_key());
                do_claim_payment_along_route(&nodes[0], &[&vec!(&nodes[fwd_idx])[..]], false, payment_preimage);
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 2);
index 562c76fa3e2c996f136c54d8fb4adb36baeba802..2cc71a2ecc7ce7a77e99abee6fc0a135a058c7bc 100644 (file)
@@ -520,12 +520,13 @@ where C::Target: chain::Filter,
        pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
                &self, handler: H
        ) {
-               let mut pending_events = Vec::new();
-               for monitor_state in self.monitors.read().unwrap().values() {
-                       pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
-               }
-               for event in pending_events {
-                       handler(event).await;
+               // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
+               // crazy dance to process a monitor's events then only remove them once we've done so.
+               let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
+               for funding_txo in mons_to_process {
+                       let mut ev;
+                       super::channelmonitor::process_events_body!(
+                               self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await);
                }
        }
 
@@ -796,12 +797,8 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
        /// [`SpendableOutputs`]: events::Event::SpendableOutputs
        /// [`BumpTransaction`]: events::Event::BumpTransaction
        fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
-               let mut pending_events = Vec::new();
                for monitor_state in self.monitors.read().unwrap().values() {
-                       pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events());
-               }
-               for event in pending_events {
-                       handler.handle_event(event);
+                       monitor_state.monitor.process_pending_events(&handler);
                }
        }
 }
index 71fc1cb6f06845e9ad49a611fce7be552a11fd00..78b5c65e1407170df338b71ca678e79b7738fda0 100644 (file)
@@ -49,7 +49,7 @@ use crate::chain::Filter;
 use crate::util::logger::Logger;
 use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
 use crate::util::byte_utils;
-use crate::events::Event;
+use crate::events::{Event, EventHandler};
 use crate::events::bump_transaction::{AnchorDescriptor, HTLCDescriptor, BumpTransactionEvent};
 
 use crate::prelude::*;
@@ -738,11 +738,6 @@ impl Readable for IrrevocablyResolvedHTLC {
 /// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
 /// information and are actively monitoring the chain.
 ///
-/// Pending Events or updated HTLCs which have not yet been read out by
-/// get_and_clear_pending_monitor_events or get_and_clear_pending_events are serialized to disk and
-/// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events
-/// gotten are fully handled before re-serializing the new state.
-///
 /// Note that the deserializer is only implemented for (BlockHash, ChannelMonitor), which
 /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
 /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
@@ -752,7 +747,7 @@ pub struct ChannelMonitor<Signer: WriteableEcdsaChannelSigner> {
        #[cfg(test)]
        pub(crate) inner: Mutex<ChannelMonitorImpl<Signer>>,
        #[cfg(not(test))]
-       inner: Mutex<ChannelMonitorImpl<Signer>>,
+       pub(super) inner: Mutex<ChannelMonitorImpl<Signer>>,
 }
 
 #[derive(PartialEq)]
@@ -829,7 +824,8 @@ pub(crate) struct ChannelMonitorImpl<Signer: WriteableEcdsaChannelSigner> {
        // we further MUST NOT generate events during block/transaction-disconnection.
        pending_monitor_events: Vec<MonitorEvent>,
 
-       pending_events: Vec<Event>,
+       pub(super) pending_events: Vec<Event>,
+       pub(super) is_processing_pending_events: bool,
 
        // Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on
        // which to take actions once they reach enough confirmations. Each entry includes the
@@ -1088,6 +1084,42 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
        }
 }
 
+macro_rules! _process_events_body {
+       ($self_opt: expr, $event_to_handle: expr, $handle_event: expr) => {
+               loop {
+                       let (pending_events, repeated_events);
+                       if let Some(us) = $self_opt {
+                               let mut inner = us.inner.lock().unwrap();
+                               if inner.is_processing_pending_events {
+                                       break;
+                               }
+                               inner.is_processing_pending_events = true;
+
+                               pending_events = inner.pending_events.clone();
+                               repeated_events = inner.get_repeated_events();
+                       } else { break; }
+                       let num_events = pending_events.len();
+
+                       for event in pending_events.into_iter().chain(repeated_events.into_iter()) {
+                               $event_to_handle = event;
+                               $handle_event;
+                       }
+
+                       if let Some(us) = $self_opt {
+                               let mut inner = us.inner.lock().unwrap();
+                               inner.pending_events.drain(..num_events);
+                               inner.is_processing_pending_events = false;
+                               if !inner.pending_events.is_empty() {
+                                       // If there's more events to process, go ahead and do so.
+                                       continue;
+                               }
+                       }
+                       break;
+               }
+       }
+}
+pub(super) use _process_events_body as process_events_body;
+
 impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// For lockorder enforcement purposes, we need to have a single site which constructs the
        /// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
@@ -1179,6 +1211,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                        payment_preimages: HashMap::new(),
                        pending_monitor_events: Vec::new(),
                        pending_events: Vec::new(),
+                       is_processing_pending_events: false,
 
                        onchain_events_awaiting_threshold_conf: Vec::new(),
                        outputs_to_watch,
@@ -1306,16 +1339,41 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                self.inner.lock().unwrap().get_and_clear_pending_monitor_events()
        }
 
-       /// Gets the list of pending events which were generated by previous actions, clearing the list
-       /// in the process.
+       /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
+       ///
+       /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
+       /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
+       /// within each channel. As the confirmation of a commitment transaction may be critical to the
+       /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
+       /// environment with spotty connections, like on mobile.
        ///
-       /// This is called by the [`EventsProvider::process_pending_events`] implementation for
-       /// [`ChainMonitor`].
+       /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
+       /// order to handle these events.
+       ///
+       /// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs
+       /// [`BumpTransaction`]: crate::events::Event::BumpTransaction
+       pub fn process_pending_events<H: Deref>(&self, handler: &H) where H::Target: EventHandler {
+               let mut ev;
+               process_events_body!(Some(self), ev, handler.handle_event(ev));
+       }
+
+       /// Processes any events asynchronously.
        ///
-       /// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
-       /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
+       /// See [`Self::process_pending_events`] for more information.
+       pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
+               &self, handler: &H
+       ) {
+               let mut ev;
+               process_events_body!(Some(self), ev, { handler(ev).await });
+       }
+
+       #[cfg(test)]
        pub fn get_and_clear_pending_events(&self) -> Vec<Event> {
-               self.inner.lock().unwrap().get_and_clear_pending_events()
+               let mut ret = Vec::new();
+               let mut lck = self.inner.lock().unwrap();
+               mem::swap(&mut ret, &mut lck.pending_events);
+               ret.append(&mut lck.get_repeated_events());
+               ret
        }
 
        pub(crate) fn get_min_seen_secret(&self) -> u64 {
@@ -2531,10 +2589,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                ret
        }
 
-       pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
-               let mut ret = Vec::new();
-               mem::swap(&mut ret, &mut self.pending_events);
-               for (claim_id, claim_event) in self.onchain_tx_handler.get_and_clear_pending_claim_events().drain(..) {
+       /// Gets the set of events that are repeated regularly (e.g. those which RBF bump
+       /// transactions). We're okay if we lose these on restart as they'll be regenerated for us at
+       /// some regular interval via [`ChannelMonitor::rebroadcast_pending_claims`].
+       pub(super) fn get_repeated_events(&mut self) -> Vec<Event> {
+               let pending_claim_events = self.onchain_tx_handler.get_and_clear_pending_claim_events();
+               let mut ret = Vec::with_capacity(pending_claim_events.len());
+               for (claim_id, claim_event) in pending_claim_events {
                        match claim_event {
                                ClaimEvent::BumpCommitment {
                                        package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_output_idx,
@@ -4096,6 +4157,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
                        payment_preimages,
                        pending_monitor_events: pending_monitor_events.unwrap(),
                        pending_events,
+                       is_processing_pending_events: false,
 
                        onchain_events_awaiting_threshold_conf,
                        outputs_to_watch,
index 95409fa1e5c011147ef82cd5e69e6b6579dbd940..8232c5e1b1611e22b2a91f467f5a11e35a526215 100644 (file)
@@ -27,7 +27,7 @@ use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::DecodeError;
 use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
 use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -41,7 +41,7 @@ use crate::routing::gossip::NodeId;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
 use crate::util::logger::Logger;
 use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
+use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
 use crate::util::scid_utils::scid_from_parts;
 
 use crate::io;
@@ -527,6 +527,10 @@ pub(super) struct ReestablishResponses {
 }
 
 /// The return type of `force_shutdown`
+///
+/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
+/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
+/// channel's counterparty_node_id and channel_id).
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
@@ -903,6 +907,34 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
        }
 
+       /// shutdown state returns the state of the channel in its various stages of shutdown
+       pub fn shutdown_state(&self) -> ChannelShutdownState {
+               if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
+                       return ChannelShutdownState::ShutdownComplete;
+               }
+               if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
+                       return ChannelShutdownState::ShutdownInitiated;
+               }
+               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
+                       return ChannelShutdownState::ResolvingHTLCs;
+               }
+               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
+                       return ChannelShutdownState::NegotiatingClosingFee;
+               }
+               return ChannelShutdownState::NotShuttingDown;
+       }
+
+       fn closing_negotiation_ready(&self) -> bool {
+               self.pending_inbound_htlcs.is_empty() &&
+               self.pending_outbound_htlcs.is_empty() &&
+               self.pending_update_fee.is_none() &&
+               self.channel_state &
+               (BOTH_SIDES_SHUTDOWN_MASK |
+                       ChannelState::AwaitingRemoteRevoke as u32 |
+                       ChannelState::PeerDisconnected as u32 |
+                       ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+       }
+
        /// Returns true if this channel is currently available for use. This is a superset of
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
@@ -1059,8 +1091,18 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
        }
 
-       pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
-               self.config.options.max_dust_htlc_exposure_msat
+       pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
+               fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
+       where F::Target: FeeEstimator
+       {
+               match self.config.options.max_dust_htlc_exposure {
+                       MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
+                               let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
+                                       ConfirmationTarget::HighPriority);
+                               feerate_per_kw as u64 * multiplier
+                       },
+                       MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+               }
        }
 
        /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
@@ -1533,7 +1575,10 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        /// Doesn't bother handling the
        /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
        /// corner case properly.
-       pub fn get_available_balances(&self) -> AvailableBalances {
+       pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+       -> AvailableBalances
+       where F::Target: FeeEstimator
+       {
                let context = &self;
                // Note that we have to handle overflow due to the above case.
                let inbound_stats = context.get_inbound_pending_htlc_stats(None);
@@ -1615,6 +1660,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                // send above the dust limit (as the router can always overpay to meet the dust limit).
                let mut remaining_msat_below_dust_exposure_limit = None;
                let mut dust_exposure_dust_limit_msat = 0;
+               let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
 
                let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
@@ -1624,17 +1670,17 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                         context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
                let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
                        remaining_msat_below_dust_exposure_limit =
-                               Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+                               Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
                }
 
                let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
                        remaining_msat_below_dust_exposure_limit = Some(cmp::min(
                                remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
-                               context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+                               max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
                }
 
@@ -2552,8 +2598,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
        }
 
-       pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
-       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
+       pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+               &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+               create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+       ) -> Result<(), ChannelError>
+       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+               FE::Target: FeeEstimator, L::Target: Logger,
+       {
                // We can't accept HTLCs sent after we've sent a shutdown.
                let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
                if local_sent_shutdown {
@@ -2606,6 +2657,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                }
 
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
                let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
@@ -2616,9 +2668,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
                        let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+                       if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+                                       on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
@@ -2626,9 +2678,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
                        let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+                       if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
-                                       on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+                                       on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
@@ -2995,16 +3047,24 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// Public version of the below, checking relevant preconditions first.
        /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
        /// returns `(None, Vec::new())`.
-       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+       pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
                   (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
-                       self.free_holding_cell_htlcs(logger)
+                       self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
 
        /// Frees any pending commitment updates in the holding cell, generating the relevant messages
        /// for our counterparty.
-       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+       fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
@@ -3033,7 +3093,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                                skimmed_fee_msat, ..
                                        } => {
                                                match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
-                                                       onion_routing_packet.clone(), false, skimmed_fee_msat, logger)
+                                                       onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
                                                {
                                                        Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
                                                        Err(e) => {
@@ -3093,7 +3153,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                return (None, htlcs_to_fail);
                        }
                        let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
-                               self.send_update_fee(feerate, false, logger)
+                               self.send_update_fee(feerate, false, fee_estimator, logger)
                        } else {
                                None
                        };
@@ -3120,8 +3180,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
-               where L::Target: Logger,
+       pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger,
        {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
                        return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
@@ -3321,7 +3383,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
                }
 
-               match self.free_holding_cell_htlcs(logger) {
+               match self.free_holding_cell_htlcs(fee_estimator, logger) {
                        (Some(mut additional_update), htlcs_to_fail) => {
                                // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
                                // strictly increasing by one, so decrement it here.
@@ -3356,8 +3418,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// Queues up an outbound update fee by placing it in the holding cell. You should call
        /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
        /// commitment update.
-       pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
-               let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
+       pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
                assert!(msg_opt.is_none(), "We forced holding cell?");
        }
 
@@ -3368,7 +3433,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        ///
        /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
        /// [`Channel`] if `force_holding_cell` is false.
-       fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
+       fn send_update_fee<F: Deref, L: Deref>(
+               &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Option<msgs::UpdateFee>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                if !self.context.is_outbound() {
                        panic!("Cannot send fee from inbound channel");
                }
@@ -3395,11 +3465,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
                let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
-               if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+               if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
@@ -3630,11 +3701,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
                        let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                        let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-                       if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+                       let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+                       if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
                                        msg.feerate_per_kw, holder_tx_dust_exposure)));
                        }
-                       if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+                       if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
                                        msg.feerate_per_kw, counterparty_tx_dust_exposure)));
                        }
@@ -3956,12 +4028,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// this point if we're the funder we should send the initial closing_signed, and in any case
        /// shutdown should complete within a reasonable timeframe.
        fn closing_negotiation_ready(&self) -> bool {
-               self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
-                       self.context.channel_state &
-                               (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
-                                ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
-                               == BOTH_SIDES_SHUTDOWN_MASK &&
-                       self.context.pending_update_fee.is_none()
+               self.context.closing_negotiation_ready()
        }
 
        /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
@@ -4988,13 +5055,16 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// commitment update.
        ///
        /// `Err`s will only be [`ChannelError::Ignore`].
-       pub fn queue_add_htlc<L: Deref>(
+       pub fn queue_add_htlc<F: Deref, L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
-               onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>, logger: &L
-       ) -> Result<(), ChannelError> where L::Target: Logger {
+               onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<(), ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                self
                        .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
-                               skimmed_fee_msat, logger)
+                               skimmed_fee_msat, fee_estimator, logger)
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
                        .map_err(|err| {
                                if let ChannelError::Ignore(_) = err { /* fine */ }
@@ -5019,11 +5089,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// on this [`Channel`] if `force_holding_cell` is false.
        ///
        /// `Err`s will only be [`ChannelError::Ignore`].
-       fn send_htlc<L: Deref>(
+       fn send_htlc<F: Deref, L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
-               skimmed_fee_msat: Option<u64>, logger: &L
-       ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
+               skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
                        return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
                }
@@ -5036,7 +5108,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
                }
 
-               let available_balances = self.context.get_available_balances();
+               let available_balances = self.context.get_available_balances(fee_estimator);
                if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
                        return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
                                available_balances.next_outbound_htlc_minimum_msat)));
@@ -5236,12 +5308,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        ///
        /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
        /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
-       pub fn send_htlc_and_commit<L: Deref>(
-               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
-               onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>, logger: &L
-       ) -> Result<Option<ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+       pub fn send_htlc_and_commit<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
+               source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
-                       onion_routing_packet, false, skimmed_fee_msat, logger);
+                       onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
                if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
                match send_res? {
                        Some(_) => {
index b4b1d64acb1b0a26b99e40d382230ef087d74b98..7df125b9323b9c3987cbfa91cf7dbaf1114d3a7d 100644 (file)
@@ -507,19 +507,19 @@ struct ClaimablePayments {
 /// running normally, and specifically must be processed before any other non-background
 /// [`ChannelMonitorUpdate`]s are applied.
 enum BackgroundEvent {
-       /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
-       /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the maybe-non-closing variant needs a public
-       /// key to handle channel resumption, whereas if the channel has been force-closed we do not
-       /// need the counterparty node_id.
+       /// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel.
+       /// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as the
+       /// maybe-non-closing variant needs a public key to handle channel resumption, whereas if the
+       /// channel has been force-closed we do not need the counterparty node_id.
        ///
        /// Note that any such events are lost on shutdown, so in general they must be updates which
        /// are regenerated on startup.
-       ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
        /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
        /// channel to continue normal operation.
        ///
        /// In general this should be used rather than
-       /// [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in cases where the
+       /// [`Self::ClosedMonitorUpdateRegeneratedOnStartup`], however in cases where the
        /// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`]
        /// error the other variant is acceptable.
        ///
@@ -1114,7 +1114,6 @@ where
        /// Notifier the lock contains sends out a notification when the lock is released.
        total_consistency_lock: RwLock<()>,
 
-       #[cfg(debug_assertions)]
        background_events_processed_since_startup: AtomicBool,
 
        persistence_notifier: Notifier,
@@ -1480,6 +1479,9 @@ pub struct ChannelDetails {
        ///
        /// [`confirmations_required`]: ChannelDetails::confirmations_required
        pub is_channel_ready: bool,
+       /// The stage of the channel's shutdown.
+       /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116.
+       pub channel_shutdown_state: Option<ChannelShutdownState>,
        /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
        /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
@@ -1519,10 +1521,13 @@ impl ChannelDetails {
                self.short_channel_id.or(self.outbound_scid_alias)
        }
 
-       fn from_channel_context<Signer: WriteableEcdsaChannelSigner>(context: &ChannelContext<Signer>,
-               best_block_height: u32, latest_features: InitFeatures) -> Self {
-
-               let balance = context.get_available_balances();
+       fn from_channel_context<Signer: WriteableEcdsaChannelSigner, F: Deref>(
+               context: &ChannelContext<Signer>, best_block_height: u32, latest_features: InitFeatures,
+               fee_estimator: &LowerBoundedFeeEstimator<F>
+       ) -> Self
+       where F::Target: FeeEstimator
+       {
+               let balance = context.get_available_balances(fee_estimator);
                let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
                        context.get_holder_counterparty_selected_channel_reserve_satoshis();
                ChannelDetails {
@@ -1567,10 +1572,33 @@ impl ChannelDetails {
                        inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()),
                        inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
                        config: Some(context.config()),
+                       channel_shutdown_state: Some(context.shutdown_state()),
                }
        }
 }
 
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+/// Further information on the details of the channel shutdown.
+/// Upon channels being forced closed (i.e. commitment transaction confirmation detected
+/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or
+/// the channel will be removed shortly.
+/// Also note, that in normal operation, peers could disconnect at any of these states
+/// and require peer re-connection before making progress onto other states
+pub enum ChannelShutdownState {
+       /// Channel has not sent or received a shutdown message.
+       NotShuttingDown,
+       /// Local node has sent a shutdown message for this channel.
+       ShutdownInitiated,
+       /// Shutdown message exchanges have concluded and the channels are in the midst of
+       /// resolving all existing open HTLCs before closing can continue.
+       ResolvingHTLCs,
+       /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates.
+       NegotiatingClosingFee,
+       /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about
+       /// to drop the channel.
+       ShutdownComplete,
+}
+
 /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
 /// These include payments that have yet to find a successful path, or have unresolved HTLCs.
 #[derive(Debug, PartialEq)]
@@ -1888,9 +1916,7 @@ macro_rules! handle_new_monitor_update {
                // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
                // any case so that it won't deadlock.
                debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
-               #[cfg(debug_assertions)] {
-                       debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
-               }
+               debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
                match $update_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
@@ -1994,6 +2020,8 @@ macro_rules! process_events_body {
                                let mut pending_events = $self.pending_events.lock().unwrap();
                                pending_events.drain(..num_events);
                                processed_all_events = pending_events.is_empty();
+                               // Note that `push_pending_forwards_ev` relies on `pending_events_processor` being
+                               // updated here with the `pending_events` lock acquired.
                                $self.pending_events_processor.store(false, Ordering::Release);
                        }
 
@@ -2082,7 +2110,6 @@ where
                        pending_events_processor: AtomicBool::new(false),
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
-                       #[cfg(debug_assertions)]
                        background_events_processed_since_startup: AtomicBool::new(false),
                        persistence_notifier: Notifier::new(),
 
@@ -2214,7 +2241,7 @@ where
                                let peer_state = &mut *peer_state_lock;
                                for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                        }
@@ -2240,17 +2267,17 @@ where
                                let peer_state = &mut *peer_state_lock;
                                for (_channel_id, channel) in peer_state.channel_by_id.iter() {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                                for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                                for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
                                        let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone());
+                                               peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
                        }
@@ -2283,7 +2310,8 @@ where
                        return peer_state.channel_by_id
                                .iter()
                                .map(|(_, channel)|
-                                       ChannelDetails::from_channel_context(&channel.context, best_block_height, features.clone()))
+                                       ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                       features.clone(), &self.fee_estimator))
                                .collect();
                }
                vec![]
@@ -3087,7 +3115,7 @@ where
                                                session_priv: session_priv.clone(),
                                                first_hop_htlc_msat: htlc_msat,
                                                payment_id,
-                                       }, onion_packet, None, &self.logger);
+                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
                                match break_chan_entry!(self, send_res, chan) {
                                        Some(monitor_update) => {
                                                match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
@@ -3805,7 +3833,8 @@ where
                                                                                });
                                                                                if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
                                                                                        payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                                       onion_packet, skimmed_fee_msat, &self.logger)
+                                                                                       onion_packet, skimmed_fee_msat, &self.fee_estimator,
+                                                                                       &self.logger)
                                                                                {
                                                                                        if let ChannelError::Ignore(msg) = e {
                                                                                                log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
@@ -4120,7 +4149,6 @@ where
        fn process_background_events(&self) -> NotifyOption {
                debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
 
-               #[cfg(debug_assertions)]
                self.background_events_processed_since_startup.store(true, Ordering::Release);
 
                let mut background_events = Vec::new();
@@ -4131,7 +4159,7 @@ where
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
@@ -4194,7 +4222,7 @@ where
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
-               chan.queue_update_fee(new_feerate, &self.logger);
+               chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
                NotifyOption::DoPersist
        }
 
@@ -4711,6 +4739,11 @@ where
        -> Result<(), (PublicKey, MsgHandleErrInternal)> {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
 
+               // If we haven't yet run background events assume we're still deserializing and shouldn't
+               // actually pass `ChannelMonitorUpdate`s to users yet. Instead, queue them up as
+               // `BackgroundEvent`s.
+               let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
+
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let chan_id = prev_hop.outpoint.to_channel_id();
@@ -4737,14 +4770,26 @@ where
                                                                log_bytes!(chan_id), action);
                                                        peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                }
-                                               let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
-                                                       peer_state, per_peer_state, chan);
-                                               if let Err(e) = res {
-                                                       // TODO: This is a *critical* error - we probably updated the outbound edge
-                                                       // of the HTLC's monitor with a preimage. We should retry this monitor
-                                                       // update over and over again until morale improves.
-                                                       log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
-                                                       return Err((counterparty_node_id, e));
+                                               if !during_init {
+                                                       let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+                                                               peer_state, per_peer_state, chan);
+                                                       if let Err(e) = res {
+                                                               // TODO: This is a *critical* error - we probably updated the outbound edge
+                                                               // of the HTLC's monitor with a preimage. We should retry this monitor
+                                                               // update over and over again until morale improves.
+                                                               log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+                                                               return Err((counterparty_node_id, e));
+                                                       }
+                                               } else {
+                                                       // If we're running during init we cannot update a monitor directly -
+                                                       // they probably haven't actually been loaded yet. Instead, push the
+                                                       // monitor update as a background event.
+                                                       self.pending_background_events.lock().unwrap().push(
+                                                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                                       counterparty_node_id,
+                                                                       funding_txo: prev_hop.outpoint,
+                                                                       update: monitor_update.clone(),
+                                                               });
                                                }
                                        }
                                        return Ok(());
@@ -4757,16 +4802,34 @@ where
                                payment_preimage,
                        }],
                };
-               // We update the ChannelMonitor on the backward link, after
-               // receiving an `update_fulfill_htlc` from the forward link.
-               let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
-               if update_res != ChannelMonitorUpdateStatus::Completed {
-                       // TODO: This needs to be handled somehow - if we receive a monitor update
-                       // with a preimage we *must* somehow manage to propagate it to the upstream
-                       // channel, or we must have an ability to receive the same event and try
-                       // again on restart.
-                       log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                               payment_preimage, update_res);
+
+               if !during_init {
+                       // We update the ChannelMonitor on the backward link, after
+                       // receiving an `update_fulfill_htlc` from the forward link.
+                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
+                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                               // TODO: This needs to be handled somehow - if we receive a monitor update
+                               // with a preimage we *must* somehow manage to propagate it to the upstream
+                               // channel, or we must have an ability to receive the same event and try
+                               // again on restart.
+                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                                       payment_preimage, update_res);
+                       }
+               } else {
+                       // If we're running during init we cannot update a monitor directly - they probably
+                       // haven't actually been loaded yet. Instead, push the monitor update as a background
+                       // event.
+                       // Note that while it's safe to use `ClosedMonitorUpdateRegeneratedOnStartup` here (the
+                       // channel is already closed) we need to ultimately handle the monitor update
+                       // completion action only after we've completed the monitor update. This is the only
+                       // way to guarantee this update *will* be regenerated on startup (otherwise if this was
+                       // from a forwarded HTLC the downstream preimage may be deleted before we claim
+                       // upstream). Thus, we need to transition to some new `BackgroundEvent` type which will
+                       // complete the monitor update completion action from `completion_action`.
+                       self.pending_background_events.lock().unwrap().push(
+                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
+                                       prev_hop.outpoint, preimage_update,
+                               )));
                }
                // Note that we do process the completion action here. This totally could be a
                // duplicate claim, but we have no way of knowing without interrogating the
@@ -4784,6 +4847,8 @@ where
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
+                               debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
+                                       "We don't support claim_htlc claims during startup - monitors may not be available yet");
                                self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
@@ -5530,7 +5595,7 @@ where
                                                _ => pending_forward_info
                                        }
                                };
-                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5703,22 +5768,27 @@ where
                }
        }
 
-       // We only want to push a PendingHTLCsForwardable event if no others are queued.
        fn push_pending_forwards_ev(&self) {
                let mut pending_events = self.pending_events.lock().unwrap();
-               let forward_ev_exists = pending_events.iter()
-                       .find(|(ev, _)| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
-                       .is_some();
-               if !forward_ev_exists {
-                       pending_events.push_back((events::Event::PendingHTLCsForwardable {
-                               time_forwardable:
-                                       Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
+               let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
+               let num_forward_events = pending_events.iter().filter(|(ev, _)|
+                       if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
+               ).count();
+               // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
+               // events is done in batches and they are not removed until we're done processing each
+               // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
+               // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
+               // payments will need an additional forwarding event before being claimed to make them look
+               // real by taking more time.
+               if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
+                       pending_events.push_back((Event::PendingHTLCsForwardable {
+                               time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
                        }, None));
                }
        }
 
        /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
-       /// [`msgs::RevokeAndACK`] should be held for the given channel until some other event
+       /// [`msgs::RevokeAndACK`] should be held for the given channel until some other action
        /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
        /// the [`ChannelMonitorUpdate`] in question.
        fn raa_monitor_updates_held(&self,
@@ -5747,7 +5817,7 @@ where
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        let funding_txo = chan.get().context.get_funding_txo();
-                                       let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+                                       let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan);
                                        let res = if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
                                                        peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
@@ -6018,7 +6088,7 @@ where
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
-                                                       chan.maybe_free_holding_cell_htlcs(&self.logger);
+                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
                                                if !holding_cell_failed_htlcs.is_empty() {
                                                        failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
                                                }
@@ -6335,7 +6405,7 @@ where
        /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
        /// [`Event`] being handled) completes, this should be called to restore the channel to normal
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
-       /// making progress and then any blocked [`ChannelMonitorUpdate`]s fly.
+       /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
        fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
                let mut errors = Vec::new();
                loop {
@@ -7366,6 +7436,7 @@ impl Writeable for ChannelDetails {
                        (35, self.inbound_htlc_maximum_msat, option),
                        (37, user_channel_id_high_opt, option),
                        (39, self.feerate_sat_per_1000_weight, option),
+                       (41, self.channel_shutdown_state, option),
                });
                Ok(())
        }
@@ -7403,6 +7474,7 @@ impl Readable for ChannelDetails {
                        (35, inbound_htlc_maximum_msat, option),
                        (37, user_channel_id_high_opt, option),
                        (39, feerate_sat_per_1000_weight, option),
+                       (41, channel_shutdown_state, option),
                });
 
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
@@ -7438,6 +7510,7 @@ impl Readable for ChannelDetails {
                        inbound_htlc_minimum_msat,
                        inbound_htlc_maximum_msat,
                        feerate_sat_per_1000_weight,
+                       channel_shutdown_state,
                })
        }
 }
@@ -7988,6 +8061,14 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
        }
 }
 
+impl_writeable_tlv_based_enum!(ChannelShutdownState,
+       (0, NotShuttingDown) => {},
+       (2, ShutdownInitiated) => {},
+       (4, ResolvingHTLCs) => {},
+       (6, NegotiatingClosingFee) => {},
+       (8, ShutdownComplete) => {}, ;
+);
+
 /// Arguments for the creation of a ChannelManager that are not deserialized.
 ///
 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
@@ -8253,7 +8334,7 @@ where
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
-                               close_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+                               close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
                        }
                }
 
@@ -8508,6 +8589,11 @@ where
                // Note that we have to do the above replays before we push new monitor updates.
                pending_background_events.append(&mut close_background_events);
 
+               // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
+               // should ensure we try them again on the inbound edge. We put them here and do so after we
+               // have a fully-constructed `ChannelManager` at the end.
+               let mut pending_claims_to_replay = Vec::new();
+
                {
                        // If we're tracking pending payments, ensure we haven't lost any by looking at the
                        // ChannelMonitor data for any channels for which we do not have authorative state
@@ -8518,7 +8604,8 @@ where
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
                        for (_, monitor) in args.channel_monitors.iter() {
-                               if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
+                               let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+                               if counterparty_opt.is_none() {
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
                                                        if path.hops.is_empty() {
@@ -8612,6 +8699,33 @@ where
                                                }
                                        }
                                }
+
+                               // Whether the downstream channel was closed or not, try to re-apply any payment
+                               // preimages from it which may be needed in upstream channels for forwarded
+                               // payments.
+                               let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
+                                       .into_iter()
+                                       .filter_map(|(htlc_source, (htlc, preimage_opt))| {
+                                               if let HTLCSource::PreviousHopData(_) = htlc_source {
+                                                       if let Some(payment_preimage) = preimage_opt {
+                                                               Some((htlc_source, payment_preimage, htlc.amount_msat,
+                                                                       // Check if `counterparty_opt.is_none()` to see if the
+                                                                       // downstream chan is closed (because we don't have a
+                                                                       // channel_id -> peer map entry).
+                                                                       counterparty_opt.is_none(),
+                                                                       monitor.get_funding_txo().0.to_channel_id()))
+                                                       } else { None }
+                                               } else {
+                                                       // If it was an outbound payment, we've handled it above - if a preimage
+                                                       // came in and we persisted the `ChannelManager` we either handled it and
+                                                       // are good to go or the channel force-closed - we don't have to handle the
+                                                       // channel still live case here.
+                                                       None
+                                               }
+                                       });
+                               for tuple in outbound_claimed_htlcs_iter {
+                                       pending_claims_to_replay.push(tuple);
+                               }
                        }
                }
 
@@ -8844,7 +8958,6 @@ where
                        pending_events_processor: AtomicBool::new(false),
                        pending_background_events: Mutex::new(pending_background_events),
                        total_consistency_lock: RwLock::new(()),
-                       #[cfg(debug_assertions)]
                        background_events_processed_since_startup: AtomicBool::new(false),
                        persistence_notifier: Notifier::new(),
 
@@ -8863,6 +8976,14 @@ where
                        channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
 
+               for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
+                       // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
+                       // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
+                       // channel is closed we just assume that it probably came from an on-chain claim.
+                       channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
+                               downstream_closed, downstream_chan_id);
+               }
+
                //TODO: Broadcast channel update for closed channels, but only after we've made a
                //connection or two.
 
@@ -10024,7 +10145,7 @@ pub mod bench {
        use crate::routing::gossip::NetworkGraph;
        use crate::routing::router::{PaymentParameters, RouteParameters};
        use crate::util::test_utils;
-       use crate::util::config::UserConfig;
+       use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
@@ -10071,6 +10192,7 @@ pub mod bench {
                let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
 
                let mut config: UserConfig = Default::default();
+               config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
                config.channel_handshake_config.minimum_depth = 1;
 
                let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
index 64bd679a94f89cdf32402a2681166028fce44ca7..024690d00daad73da84072674bcc2094d32df149 100644 (file)
@@ -27,7 +27,7 @@ use crate::util::scid_utils;
 use crate::util::test_utils;
 use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
 use crate::util::errors::APIError;
-use crate::util::config::UserConfig;
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 use crate::util::ser::{ReadableArgs, Writeable};
 
 use bitcoin::blockdata::block::{Block, BlockHeader};
@@ -1806,6 +1806,28 @@ macro_rules! get_route_and_payment_hash {
        }}
 }
 
+pub fn check_payment_claimable(
+       event: &Event, expected_payment_hash: PaymentHash, expected_payment_secret: PaymentSecret,
+       expected_recv_value: u64, expected_payment_preimage: Option<PaymentPreimage>,
+       expected_receiver_node_id: PublicKey,
+) {
+       match event {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, .. } => {
+                       assert_eq!(expected_payment_hash, *payment_hash);
+                       assert_eq!(expected_recv_value, *amount_msat);
+                       assert_eq!(expected_receiver_node_id, receiver_node_id.unwrap());
+                       match purpose {
+                               PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+                                       assert_eq!(&expected_payment_preimage, payment_preimage);
+                                       assert_eq!(expected_payment_secret, *payment_secret);
+                               },
+                               _ => {},
+                       }
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
 #[macro_export]
 #[cfg(any(test, ldk_bench, feature = "_test_utils"))]
 macro_rules! expect_payment_claimable {
@@ -1815,22 +1837,8 @@ macro_rules! expect_payment_claimable {
        ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr, $expected_payment_preimage: expr, $expected_receiver_node_id: expr) => {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
-               match events[0] {
-                       $crate::events::Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, .. } => {
-                               assert_eq!($expected_payment_hash, *payment_hash);
-                               assert_eq!($expected_recv_value, amount_msat);
-                               assert_eq!($expected_receiver_node_id, receiver_node_id.unwrap());
-                               match purpose {
-                                       $crate::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
-                                               assert_eq!(&$expected_payment_preimage, payment_preimage);
-                                               assert_eq!($expected_payment_secret, *payment_secret);
-                                       },
-                                       _ => {},
-                               }
-                       },
-                       _ => panic!("Unexpected event"),
-               }
-       }
+               $crate::ln::functional_test_utils::check_payment_claimable(&events[0], $expected_payment_hash, $expected_payment_secret, $expected_recv_value, $expected_payment_preimage, $expected_receiver_node_id)
+       };
 }
 
 #[macro_export]
@@ -1951,6 +1959,16 @@ macro_rules! expect_payment_forwarded {
        }
 }
 
+#[cfg(test)]
+#[macro_export]
+macro_rules! expect_channel_shutdown_state {
+       ($node: expr, $chan_id: expr, $state: path) => {
+               let chan_details = $node.node.list_channels().into_iter().filter(|cd| cd.channel_id == $chan_id).collect::<Vec<ChannelDetails>>();
+               assert_eq!(chan_details.len(), 1);
+               assert_eq!(chan_details[0].channel_shutdown_state, Some($state));
+       }
+}
+
 #[cfg(any(test, ldk_bench, feature = "_test_utils"))]
 pub fn expect_channel_pending_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
        let events = node.node.get_and_clear_pending_events();
@@ -2572,8 +2590,10 @@ pub fn test_default_channel_config() -> UserConfig {
        // It now defaults to 1, so we simply set it to the expected value here.
        default_config.channel_handshake_config.our_htlc_minimum_msat = 1000;
        // When most of our tests were written, we didn't have the notion of a `max_dust_htlc_exposure_msat`,
-       // It now defaults to 5_000_000 msat; to avoid interfering with tests we bump it to 50_000_000 msat.
-       default_config.channel_config.max_dust_htlc_exposure_msat = 50_000_000;
+       // to avoid interfering with tests we bump it to 50_000_000 msat (assuming the default test
+       // feerate of 253).
+       default_config.channel_config.max_dust_htlc_exposure =
+               MaxDustHTLCExposure::FeeRateMultiplier(50_000_000 / 253);
        default_config
 }
 
index 7d8d52427f251e7ce16cba67bf948889c35ccd11..271ff541a84981733090b30600956886c2845d40 100644 (file)
@@ -35,7 +35,7 @@ use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::string::UntrustedString;
-use crate::util::config::UserConfig;
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::blockdata::script::{Builder, Script};
@@ -9515,7 +9515,7 @@ enum ExposureEvent {
        AtUpdateFeeOutbound,
 }
 
-fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
        // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
        // policy.
        //
@@ -9530,7 +9530,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
 
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let mut config = test_default_channel_config();
-       config.channel_config.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+       config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
+               // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
+               // to get roughly the same initial value as the default setting when this test was
+               // originally written.
+               MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
+       } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -9574,20 +9579,21 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        let (mut route, payment_hash, _, payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
 
-       let dust_buffer_feerate = {
+       let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               chan.context.get_dust_buffer_feerate(None) as u64
+               (chan.context.get_dust_buffer_feerate(None) as u64,
+               chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+       let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
 
        let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+       let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
 
        let dust_htlc_on_counterparty_tx: u64 = 4;
-       let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+       let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
 
        if on_holder_tx {
                if dust_outbound_balance {
@@ -9639,7 +9645,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                                ), true, APIError::ChannelUnavailable { .. }, {});
                }
        } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 });
+               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
                nodes[1].node.send_payment_with_route(&route, payment_hash,
                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
                check_added_monitors!(nodes[1], 1);
@@ -9652,18 +9658,24 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                        // Outbound dust balance: 6399 sats
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
                } else {
                        // Outbound dust balance: 5200 sats
                        nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
                                format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 1,
-                                       config.channel_config.max_dust_htlc_exposure_msat), 1);
+                                       dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
+                                       max_dust_htlc_exposure_msat), 1);
                }
        } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
                route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
-               nodes[0].node.send_payment_with_route(&route, payment_hash,
-                       RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+               // For the multiplier dust exposure limit, since it scales with feerate,
+               // we need to add a lot of HTLCs that will become dust at the new feerate
+               // to cross the threshold.
+               for _ in 0..20 {
+                       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
+                       nodes[0].node.send_payment_with_route(&route, payment_hash,
+                               RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+               }
                {
                        let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
                        *feerate_lock = *feerate_lock * 10;
@@ -9678,20 +9690,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        added_monitors.clear();
 }
 
+fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+}
+
 #[test]
 fn test_max_dust_htlc_exposure() {
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
+       do_test_max_dust_htlc_exposure_by_threshold_type(false);
+       do_test_max_dust_htlc_exposure_by_threshold_type(true);
 }
 
 #[test]
index 1aa3420caf503f3704e502abe248fa869c7feded..e5faedfe7fc39c958cc52f1c40fbcbc09bcebf27 100644 (file)
@@ -26,7 +26,7 @@ use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
 use crate::ln::wire::Encode;
 use crate::util::ser::{Writeable, Writer};
 use crate::util::test_utils;
-use crate::util::config::{UserConfig, ChannelConfig};
+use crate::util::config::{UserConfig, ChannelConfig, MaxDustHTLCExposure};
 use crate::util::errors::APIError;
 
 use bitcoin::hash_types::BlockHash;
@@ -671,6 +671,7 @@ fn do_test_onion_failure_stale_channel_update(announced_channel: bool) {
        config.channel_handshake_config.announced_channel = announced_channel;
        config.channel_handshake_limits.force_announced_channel_preference = false;
        config.accept_forwards_to_priv_channels = !announced_channel;
+       config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let persister;
        let chain_monitor;
@@ -1371,10 +1372,19 @@ fn test_phantom_failure_too_low_recv_amt() {
 
 #[test]
 fn test_phantom_dust_exposure_failure() {
+       do_test_phantom_dust_exposure_failure(false);
+       do_test_phantom_dust_exposure_failure(true);
+}
+
+fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) {
        // Set the max dust exposure to the dust limit.
        let max_dust_exposure = 546;
        let mut receiver_config = UserConfig::default();
-       receiver_config.channel_config.max_dust_htlc_exposure_msat = max_dust_exposure;
+       // Default test fee estimator rate is 253, so to set the max dust exposure to the dust limit,
+       // we need to set the multiplier to 2.
+       receiver_config.channel_config.max_dust_htlc_exposure =
+               if multiplier_dust_limit { MaxDustHTLCExposure::FeeRateMultiplier(2) }
+               else { MaxDustHTLCExposure::FixedLimitMsat(max_dust_exposure) };
        receiver_config.channel_handshake_config.announced_channel = true;
 
        let chanmon_cfgs = create_chanmon_cfgs(2);
index 8d3fbf0ec64667a756c48777fced8f53a2997b5b..2c8f824eab61512651e2a59e0f40355c8549f522 100644 (file)
@@ -21,7 +21,7 @@ use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
 use crate::ln::wire::Encode;
-use crate::util::config::UserConfig;
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 use crate::util::ser::Writeable;
 use crate::util::test_utils;
 
@@ -141,10 +141,12 @@ fn do_test_1_conf_open(connect_style: ConnectStyle) {
        alice_config.channel_handshake_config.minimum_depth = 1;
        alice_config.channel_handshake_config.announced_channel = true;
        alice_config.channel_handshake_limits.force_announced_channel_preference = false;
+       alice_config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
        let mut bob_config = UserConfig::default();
        bob_config.channel_handshake_config.minimum_depth = 1;
        bob_config.channel_handshake_config.announced_channel = true;
        bob_config.channel_handshake_limits.force_announced_channel_preference = false;
+       bob_config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
index a2ec37a8abbaf113afe57e5bbcaa1fdafcbc40a0..3aa48c1b45d2f8f49b17746db0aa7c33d0d2fca1 100644 (file)
@@ -12,7 +12,7 @@
 use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::transaction::OutPoint;
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
-use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields};
+use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails};
 use crate::routing::router::{PaymentParameters, get_route};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
@@ -67,6 +67,169 @@ fn pre_funding_lock_shutdown_test() {
        check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
 
+#[test]
+fn expect_channel_shutdown_state() {
+       // Test sending a shutdown prior to channel_ready after funding generation
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
+
+       nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap();
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
+
+       let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+
+       // node1 goes into NegotiatingClosingFee since there are no HTLCs in flight, note that it
+       // doesnt mean that node1 has sent/recved its closing signed message
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NegotiatingClosingFee);
+
+       let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NegotiatingClosingFee);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NegotiatingClosingFee);
+
+       let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+       let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+       let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+       let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+       assert!(node_1_none.is_none());
+
+       assert!(nodes[0].node.list_channels().is_empty());
+       assert!(nodes[1].node.list_channels().is_empty());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+}
+
+#[test]
+fn expect_channel_shutdown_state_with_htlc() {
+       // Test sending a shutdown with outstanding updates pending.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let _chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+
+       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
+
+       nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap();
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
+
+       let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ShutdownInitiated);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::ResolvingHTLCs);
+
+       let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ResolvingHTLCs);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::ResolvingHTLCs);
+
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Claim Funds on Node2
+       nodes[2].node.claim_funds(payment_preimage_0);
+       check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash_0, 100_000);
+
+       // Fulfil HTLCs on node1 and node0
+       let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       assert!(updates.update_add_htlcs.is_empty());
+       assert!(updates.update_fail_htlcs.is_empty());
+       assert!(updates.update_fail_malformed_htlcs.is_empty());
+       assert!(updates.update_fee.is_none());
+       assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
+       check_added_monitors!(nodes[1], 1);
+       let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+       // Still in "resolvingHTLCs" on chan1 after htlc removed on chan2
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::ResolvingHTLCs);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::ResolvingHTLCs);
+
+       assert!(updates_2.update_add_htlcs.is_empty());
+       assert!(updates_2.update_fail_htlcs.is_empty());
+       assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+       assert!(updates_2.update_fee.is_none());
+       assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+       expect_payment_sent!(nodes[0], payment_preimage_0);
+
+       // all htlcs removed, chan1 advances to NegotiatingClosingFee
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NegotiatingClosingFee);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NegotiatingClosingFee);
+
+       // ClosingSignNegotion process
+       let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+       let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+       let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+       let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+       assert!(node_1_none.is_none());
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+
+       // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
+       assert!(nodes[0].node.list_channels().is_empty());
+}
+
+#[test]
+fn expect_channel_shutdown_state_with_force_closure() {
+       // Test sending a shutdown prior to channel_ready after funding generation
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
+       expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
+
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       check_closed_broadcast!(nodes[1], true);
+       check_added_monitors!(nodes[1], 1);
+
+       expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
+       assert!(nodes[1].node.list_channels().is_empty());
+
+       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(node_txn.len(), 1);
+       check_spends!(node_txn[0], chan_1.3);
+       mine_transaction(&nodes[0], &node_txn[0]);
+       check_added_monitors!(nodes[0], 1);
+
+       assert!(nodes[0].node.list_channels().is_empty());
+       assert!(nodes[1].node.list_channels().is_empty());
+       check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+}
+
 #[test]
 fn updates_shutdown_wait() {
        // Test sending a shutdown with outstanding updates pending
index 644772b81498bdcbc94251f067b4740176531471..b9b70e0a03165e49f71f7f8c9bf185fd0388d7a2 100644 (file)
@@ -10,7 +10,7 @@
 //! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers
 
 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
-use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::{PublicKey, Verification};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1;
 
@@ -409,6 +409,29 @@ macro_rules! get_pubkey_from_node_id {
        }
 }
 
+/// Verifies the signature of a [`NodeAnnouncement`].
+///
+/// Returns an error if it is invalid.
+pub fn verify_node_announcement<C: Verification>(msg: &NodeAnnouncement, secp_ctx: &Secp256k1<C>) -> Result<(), LightningError> {
+       let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
+       secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement");
+
+       Ok(())
+}
+
+/// Verifies all signatures included in a [`ChannelAnnouncement`].
+///
+/// Returns an error if one of the signatures is invalid.
+pub fn verify_channel_announcement<C: Verification>(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1<C>) -> Result<(), LightningError> {
+       let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
+       secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement");
+       secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement");
+       secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement");
+       secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement");
+
+       Ok(())
+}
+
 impl<G: Deref<Target=NetworkGraph<L>>, U: Deref, L: Deref> RoutingMessageHandler for P2PGossipSync<G, U, L>
 where U::Target: UtxoLookup, L::Target: Logger
 {
@@ -1401,8 +1424,7 @@ impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
        /// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
        /// routing messages from a source using a protocol other than the lightning P2P protocol.
        pub fn update_node_from_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<(), LightningError> {
-               let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
-               secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement");
+               verify_node_announcement(msg, &self.secp_ctx)?;
                self.update_node_from_announcement_intern(&msg.contents, Some(&msg))
        }
 
@@ -1465,11 +1487,7 @@ impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
        where
                U::Target: UtxoLookup,
        {
-               let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
-               secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement");
-               secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement");
-               secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement");
-               secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement"), "channel_announcement");
+               verify_channel_announcement(msg, &self.secp_ctx)?;
                self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup)
        }
 
index 8c00df99a7eed96cbb3fb4633b2622c53532dfa6..d3539579c50ae16d8a8d92eb806af6364243327d 100644 (file)
@@ -204,6 +204,15 @@ impl InFlightHtlcs {
                }
        }
 
+       /// Adds a known HTLC given the public key of the HTLC source, target, and short channel
+       /// id.
+       pub fn add_inflight_htlc(&mut self, source: &NodeId, target: &NodeId, channel_scid: u64, used_msat: u64){
+               self.0
+                       .entry((channel_scid, source < target))
+                       .and_modify(|used_liquidity_msat| *used_liquidity_msat += used_msat)
+                       .or_insert(used_msat);
+       }
+
        /// Returns liquidity in msat given the public key of the HTLC source, target, and short channel
        /// id.
        pub fn used_liquidity_msat(&self, source: &NodeId, target: &NodeId, channel_scid: u64) -> Option<u64> {
@@ -2686,7 +2695,8 @@ mod tests {
                        inbound_htlc_minimum_msat: None,
                        inbound_htlc_maximum_msat: None,
                        config: None,
-                       feerate_sat_per_1000_weight: None
+                       feerate_sat_per_1000_weight: None,
+                       channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown),
                }
        }
 
@@ -6757,6 +6767,7 @@ pub(crate) mod bench_utils {
                        inbound_htlc_maximum_msat: None,
                        config: None,
                        feerate_sat_per_1000_weight: None,
+                       channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown),
                }
        }
 
index 5c8d5b6c554fc11ad4f89500a69f8f600812ad66..267774481b4b98df545112c5f7468721ebb90de6 100644 (file)
@@ -315,6 +315,55 @@ impl Default for ChannelHandshakeLimits {
        }
 }
 
+/// Options for how to set the max dust HTLC exposure allowed on a channel. See
+/// [`ChannelConfig::max_dust_htlc_exposure`] for details.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum MaxDustHTLCExposure {
+       /// This sets a fixed limit on the total dust exposure in millisatoshis. Setting this too low
+       /// may prevent the sending or receipt of low-value HTLCs on high-traffic nodes, however this
+       /// limit is very important to prevent stealing of large amounts of dust HTLCs by miners
+       /// through [fee griefing
+       /// attacks](https://lists.linuxfoundation.org/pipermail/lightning-dev/2020-May/002714.html).
+       ///
+       /// Note that if the feerate increases significantly, without a manual increase
+       /// to this maximum the channel may be unable to send/receive HTLCs between the maximum dust
+       /// exposure and the new minimum value for HTLCs to be economically viable to claim.
+       FixedLimitMsat(u64),
+       /// This sets a multiplier on the estimated high priority feerate (sats/KW, as obtained from
+       /// [`FeeEstimator`]) to determine the maximum allowed dust exposure. If this variant is used
+       /// then the maximum dust exposure in millisatoshis is calculated as:
+       /// `high_priority_feerate_per_kw * value`. For example, with our default value
+       /// `FeeRateMultiplier(5000)`:
+       ///
+       /// - For the minimum fee rate of 1 sat/vByte (250 sat/KW, although the minimum
+       /// defaults to 253 sats/KW for rounding, see [`FeeEstimator`]), the max dust exposure would
+       /// be 253 * 5000 = 1,265,000 msats.
+       /// - For a fee rate of 30 sat/vByte (7500 sat/KW), the max dust exposure would be
+       /// 7500 * 5000 = 37,500,000 msats.
+       ///
+       /// This allows the maximum dust exposure to automatically scale with fee rate changes.
+       ///
+       /// Note, if you're using a third-party fee estimator, this may leave you more exposed to a
+       /// fee griefing attack, where your fee estimator may purposely overestimate the fee rate,
+       /// causing you to accept more dust HTLCs than you would otherwise.
+       ///
+       /// This variant is primarily meant to serve pre-anchor channels, as HTLC fees being included
+       /// on HTLC outputs means your channel may be subject to more dust exposure in the event of
+       /// increases in fee rate.
+       ///
+       /// # Backwards Compatibility
+       /// This variant only became available in LDK 0.0.116, so if you downgrade to a prior version
+       /// by default this will be set to a [`Self::FixedLimitMsat`] of 5,000,000 msat.
+       ///
+       /// [`FeeEstimator`]: crate::chain::chaininterface::FeeEstimator
+       FeeRateMultiplier(u64),
+}
+
+impl_writeable_tlv_based_enum!(MaxDustHTLCExposure, ;
+       (1, FixedLimitMsat),
+       (3, FeeRateMultiplier),
+);
+
 /// Options which apply on a per-channel basis and may change at runtime or based on negotiation
 /// with our counterparty.
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
@@ -372,15 +421,15 @@ pub struct ChannelConfig {
        /// channel negotiated throughout the channel open process, along with the fees required to have
        /// a broadcastable HTLC spending transaction. When a channel supports anchor outputs
        /// (specifically the zero fee HTLC transaction variant), this threshold no longer takes into
-       /// account the HTLC transaction fee as it is zero.
+       /// account the HTLC transaction fee as it is zero. Because of this, you may want to set this
+       /// value to a fixed limit for channels using anchor outputs, while the fee rate multiplier
+       /// variant is primarily intended for use with pre-anchor channels.
        ///
-       /// This limit is applied for sent, forwarded, and received HTLCs and limits the total
-       /// exposure across all three types per-channel. Setting this too low may prevent the
-       /// sending or receipt of low-value HTLCs on high-traffic nodes, and this limit is very
-       /// important to prevent stealing of dust HTLCs by miners.
+       /// The selected limit is applied for sent, forwarded, and received HTLCs and limits the total
+       /// exposure across all three types per-channel.
        ///
-       /// Default value: 5_000_000 msat.
-       pub max_dust_htlc_exposure_msat: u64,
+       /// Default value: [`MaxDustHTLCExposure::FeeRateMultiplier`] with a multiplier of 5000.
+       pub max_dust_htlc_exposure: MaxDustHTLCExposure,
        /// The additional fee we're willing to pay to avoid waiting for the counterparty's
        /// `to_self_delay` to reclaim funds.
        ///
@@ -451,7 +500,7 @@ impl ChannelConfig {
                        self.cltv_expiry_delta = cltv_expiry_delta;
                }
                if let Some(max_dust_htlc_exposure_msat) = update.max_dust_htlc_exposure_msat {
-                       self.max_dust_htlc_exposure_msat = max_dust_htlc_exposure_msat;
+                       self.max_dust_htlc_exposure = max_dust_htlc_exposure_msat;
                }
                if let Some(force_close_avoidance_max_fee_satoshis) = update.force_close_avoidance_max_fee_satoshis {
                        self.force_close_avoidance_max_fee_satoshis = force_close_avoidance_max_fee_satoshis;
@@ -466,24 +515,67 @@ impl Default for ChannelConfig {
                        forwarding_fee_proportional_millionths: 0,
                        forwarding_fee_base_msat: 1000,
                        cltv_expiry_delta: 6 * 12, // 6 blocks/hour * 12 hours
-                       max_dust_htlc_exposure_msat: 5_000_000,
+                       max_dust_htlc_exposure: MaxDustHTLCExposure::FeeRateMultiplier(5000),
                        force_close_avoidance_max_fee_satoshis: 1000,
                        accept_underpaying_htlcs: false,
                }
        }
 }
 
-impl_writeable_tlv_based!(ChannelConfig, {
-       (0, forwarding_fee_proportional_millionths, required),
-       (1, accept_underpaying_htlcs, (default_value, false)),
-       (2, forwarding_fee_base_msat, required),
-       (4, cltv_expiry_delta, required),
-       (6, max_dust_htlc_exposure_msat, required),
-       // ChannelConfig serialized this field with a required type of 8 prior to the introduction of
-       // LegacyChannelConfig. To make sure that serialization is not compatible with this one, we use
-       // the next required type of 10, which if seen by the old serialization will always fail.
-       (10, force_close_avoidance_max_fee_satoshis, required),
-});
+impl crate::util::ser::Writeable for ChannelConfig {
+       fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
+               let max_dust_htlc_exposure_msat_fixed_limit = match self.max_dust_htlc_exposure {
+                       MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+                       MaxDustHTLCExposure::FeeRateMultiplier(_) => 5_000_000,
+               };
+               write_tlv_fields!(writer, {
+                       (0, self.forwarding_fee_proportional_millionths, required),
+                       (1, self.accept_underpaying_htlcs, (default_value, false)),
+                       (2, self.forwarding_fee_base_msat, required),
+                       (3, self.max_dust_htlc_exposure, required),
+                       (4, self.cltv_expiry_delta, required),
+                       (6, max_dust_htlc_exposure_msat_fixed_limit, required),
+                       // ChannelConfig serialized this field with a required type of 8 prior to the introduction of
+                       // LegacyChannelConfig. To make sure that serialization is not compatible with this one, we use
+                       // the next required type of 10, which if seen by the old serialization will always fail.
+                       (10, self.force_close_avoidance_max_fee_satoshis, required),
+               });
+               Ok(())
+       }
+}
+
+impl crate::util::ser::Readable for ChannelConfig {
+       fn read<R: crate::io::Read>(reader: &mut R) -> Result<Self, crate::ln::msgs::DecodeError> {
+               let mut forwarding_fee_proportional_millionths = 0;
+               let mut accept_underpaying_htlcs = false;
+               let mut forwarding_fee_base_msat = 1000;
+               let mut cltv_expiry_delta = 6 * 12;
+               let mut max_dust_htlc_exposure_msat = None;
+               let mut max_dust_htlc_exposure_enum = None;
+               let mut force_close_avoidance_max_fee_satoshis = 1000;
+               read_tlv_fields!(reader, {
+                       (0, forwarding_fee_proportional_millionths, required),
+                       (1, accept_underpaying_htlcs, (default_value, false)),
+                       (2, forwarding_fee_base_msat, required),
+                       (3, max_dust_htlc_exposure_enum, option),
+                       (4, cltv_expiry_delta, required),
+                       // Has always been written, but became optionally read in 0.0.116
+                       (6, max_dust_htlc_exposure_msat, option),
+                       (10, force_close_avoidance_max_fee_satoshis, required),
+               });
+               let max_dust_htlc_fixed_limit = max_dust_htlc_exposure_msat.unwrap_or(5_000_000);
+               let max_dust_htlc_exposure_msat = max_dust_htlc_exposure_enum
+                       .unwrap_or(MaxDustHTLCExposure::FixedLimitMsat(max_dust_htlc_fixed_limit));
+               Ok(Self {
+                       forwarding_fee_proportional_millionths,
+                       accept_underpaying_htlcs,
+                       forwarding_fee_base_msat,
+                       cltv_expiry_delta,
+                       max_dust_htlc_exposure: max_dust_htlc_exposure_msat,
+                       force_close_avoidance_max_fee_satoshis,
+               })
+       }
+}
 
 /// A parallel struct to [`ChannelConfig`] to define partial updates.
 #[allow(missing_docs)]
@@ -491,7 +583,7 @@ pub struct ChannelConfigUpdate {
        pub forwarding_fee_proportional_millionths: Option<u32>,
        pub forwarding_fee_base_msat: Option<u32>,
        pub cltv_expiry_delta: Option<u16>,
-       pub max_dust_htlc_exposure_msat: Option<u64>,
+       pub max_dust_htlc_exposure_msat: Option<MaxDustHTLCExposure>,
        pub force_close_avoidance_max_fee_satoshis: Option<u64>,
 }
 
@@ -513,7 +605,7 @@ impl From<ChannelConfig> for ChannelConfigUpdate {
                        forwarding_fee_proportional_millionths: Some(config.forwarding_fee_proportional_millionths),
                        forwarding_fee_base_msat: Some(config.forwarding_fee_base_msat),
                        cltv_expiry_delta: Some(config.cltv_expiry_delta),
-                       max_dust_htlc_exposure_msat: Some(config.max_dust_htlc_exposure_msat),
+                       max_dust_htlc_exposure_msat: Some(config.max_dust_htlc_exposure),
                        force_close_avoidance_max_fee_satoshis: Some(config.force_close_avoidance_max_fee_satoshis),
                }
        }
@@ -546,12 +638,17 @@ impl Default for LegacyChannelConfig {
 
 impl crate::util::ser::Writeable for LegacyChannelConfig {
        fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
+               let max_dust_htlc_exposure_msat_fixed_limit = match self.options.max_dust_htlc_exposure {
+                       MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+                       MaxDustHTLCExposure::FeeRateMultiplier(_) => 5_000_000,
+               };
                write_tlv_fields!(writer, {
                        (0, self.options.forwarding_fee_proportional_millionths, required),
-                       (1, self.options.max_dust_htlc_exposure_msat, (default_value, 5_000_000)),
+                       (1, max_dust_htlc_exposure_msat_fixed_limit, required),
                        (2, self.options.cltv_expiry_delta, required),
                        (3, self.options.force_close_avoidance_max_fee_satoshis, (default_value, 1000)),
                        (4, self.announced_channel, required),
+                       (5, self.options.max_dust_htlc_exposure, required),
                        (6, self.commit_upfront_shutdown_pubkey, required),
                        (8, self.options.forwarding_fee_base_msat, required),
                });
@@ -562,25 +659,32 @@ impl crate::util::ser::Writeable for LegacyChannelConfig {
 impl crate::util::ser::Readable for LegacyChannelConfig {
        fn read<R: crate::io::Read>(reader: &mut R) -> Result<Self, crate::ln::msgs::DecodeError> {
                let mut forwarding_fee_proportional_millionths = 0;
-               let mut max_dust_htlc_exposure_msat = 5_000_000;
+               let mut max_dust_htlc_exposure_msat_fixed_limit = None;
                let mut cltv_expiry_delta = 0;
                let mut force_close_avoidance_max_fee_satoshis = 1000;
                let mut announced_channel = false;
                let mut commit_upfront_shutdown_pubkey = false;
                let mut forwarding_fee_base_msat = 0;
+               let mut max_dust_htlc_exposure_enum = None;
                read_tlv_fields!(reader, {
                        (0, forwarding_fee_proportional_millionths, required),
-                       (1, max_dust_htlc_exposure_msat, (default_value, 5_000_000u64)),
+                       // Has always been written, but became optionally read in 0.0.116
+                       (1, max_dust_htlc_exposure_msat_fixed_limit, option),
                        (2, cltv_expiry_delta, required),
                        (3, force_close_avoidance_max_fee_satoshis, (default_value, 1000u64)),
                        (4, announced_channel, required),
+                       (5, max_dust_htlc_exposure_enum, option),
                        (6, commit_upfront_shutdown_pubkey, required),
                        (8, forwarding_fee_base_msat, required),
                });
+               let max_dust_htlc_exposure_msat_fixed_limit =
+                       max_dust_htlc_exposure_msat_fixed_limit.unwrap_or(5_000_000);
+               let max_dust_htlc_exposure_msat = max_dust_htlc_exposure_enum
+                       .unwrap_or(MaxDustHTLCExposure::FixedLimitMsat(max_dust_htlc_exposure_msat_fixed_limit));
                Ok(Self {
                        options: ChannelConfig {
                                forwarding_fee_proportional_millionths,
-                               max_dust_htlc_exposure_msat,
+                               max_dust_htlc_exposure: max_dust_htlc_exposure_msat,
                                cltv_expiry_delta,
                                force_close_avoidance_max_fee_satoshis,
                                forwarding_fee_base_msat,
index 710085e2bbd5fa91cff4fea09d3065eda919bb50..1744b923d5e92589eee665c5a1ef1e263b6cfd8b 100644 (file)
@@ -997,7 +997,7 @@ macro_rules! impl_writeable_tlv_based_enum {
                                                f()
                                        }),*
                                        $($tuple_variant_id => {
-                                               Ok($st::$tuple_variant_name(Readable::read(reader)?))
+                                               Ok($st::$tuple_variant_name($crate::util::ser::Readable::read(reader)?))
                                        }),*
                                        _ => {
                                                Err($crate::ln::msgs::DecodeError::UnknownRequiredFeature)