Merge pull request #2100 from douglaz/docs_fixes
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 21 Mar 2023 18:44:23 +0000 (18:44 +0000)
committerGitHub <noreply@github.com>
Tue, 21 Mar 2023 18:44:23 +0000 (18:44 +0000)
Docs improvements

16 files changed:
.github/workflows/build.yml
ci/ci-tests.sh [new file with mode: 0755]
fuzz/src/router.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/convert.rs
lightning-invoice/src/utils.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/onion_message/functional_tests.rs
lightning/src/routing/router.rs
lightning/src/util/test_utils.rs

index 100441d2be200cb85e6e70b296691851f9edb818..881e4f24749f36cea71f75f7fa2921c5190263ab 100644 (file)
@@ -7,70 +7,21 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        platform: [ ubuntu-latest ]
-        toolchain: [ stable,
-                     beta,
-                     # 1.41.1 is MSRV for Rust-Lightning, lightning-invoice, and lightning-persister
-                     1.41.1,
-                     # 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, lightning-background-processor
-                     1.45.2,
-                     # 1.47.0 will be the MSRV for no-std builds using hashbrown once core2 is updated
-                     1.47.0]
+        platform: [ ubuntu-latest, windows-latest, macos-latest ]
+        toolchain: [ stable, beta ]
         include:
           - toolchain: stable
-            build-net-tokio: true
-            build-no-std: true
-            build-futures: true
-            build-tx-sync: true
+            platform: ubuntu-latest
             coverage: true
-          - toolchain: stable
-            platform: macos-latest
-            build-net-tokio: true
-            build-no-std: true
-            build-futures: true
-            build-tx-sync: true
-          - toolchain: stable
-            test-custom-message: true
-          - toolchain: beta
-            platform: macos-latest
-            build-net-tokio: true
-            build-no-std: true
-            build-futures: true
-            build-tx-sync: true
-          - toolchain: stable
+          # 1.48.0 is the MSRV for all crates except lightning-transaction-sync and Win/Mac
+          - toolchain: 1.48.0
+            platform: ubuntu-latest
+          # Windows requires 1.49.0 because that's the MSRV for supported Tokio
+          - toolchain: 1.49.0
             platform: windows-latest
-            build-net-tokio: true
-            build-no-std: true
-            build-futures: true
-            build-tx-sync: false
-          - toolchain: beta
-            platform: windows-latest
-            build-net-tokio: true
-            build-no-std: true
-            build-futures: true
-            build-tx-sync: false
-          - toolchain: beta
-            build-net-tokio: true
-            build-no-std: true
-            build-futures: true
-            build-tx-sync: true
-          - toolchain: beta
-            test-custom-message: true
-          - toolchain: 1.41.1
-            build-no-std: false
-            test-log-variants: true
-            build-futures: false
-            build-tx-sync: false
-          - toolchain: 1.45.2
-            build-net-old-tokio: true
-            build-net-tokio: true
-            build-no-std: false
-            build-futures: true
-            build-tx-sync: false
-          - toolchain: 1.47.0
-            build-futures: true
-            build-no-std: true
-            build-tx-sync: false
+          # MacOS-latest requires 1.54.0 because that's what's required for linking to work properly
+          - toolchain: 1.54.0
+            platform: macos-latest
     runs-on: ${{ matrix.platform }}
     steps:
       - name: Checkout source code
@@ -81,162 +32,24 @@ jobs:
           toolchain: ${{ matrix.toolchain }}
           override: true
           profile: minimal
-      - name: Pin tokio to 1.14 for Rust 1.45
-        if: "matrix.build-net-old-tokio"
-        run: cargo update -p tokio --precise "1.14.0" --verbose
-        env:
-          CARGO_NET_GIT_FETCH_WITH_CLI: "true"
-      - name: Pin tokio to 1.26 for Windows
-        if: "matrix.platform == 'windows-latest'"
-        run: cargo update -p tokio --precise "1.26.0" --verbose
-        env:
-          CARGO_NET_GIT_FETCH_WITH_CLI: "true"
-      - name: Build on Rust ${{ matrix.toolchain }} with net-tokio
-        if: "matrix.build-net-tokio && !matrix.coverage"
-        run: cargo build --verbose --color always
-      - name: Build on Rust ${{ matrix.toolchain }} with net-tokio, and full code-linking for coverage generation
-        if: matrix.coverage
-        run: RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always
-      - name: Build on Rust ${{ matrix.toolchain }}
-        if: "! matrix.build-net-tokio"
-        run: |
-          cargo build --verbose  --color always -p lightning
-          cargo build --verbose  --color always -p lightning-invoice
-          cargo build --verbose  --color always -p lightning-persister
-      - name: Build on Rust ${{ matrix.toolchain }} with all Log-Limiting features
-        if: matrix.test-log-variants
-        run: |
-          cd lightning
-          for FEATURE in $(cat Cargo.toml | grep '^max_level_' | awk '{ print $1 }'); do
-            cargo build --verbose --color always --features $FEATURE
-          done
-      - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features
-        if: "matrix.build-net-tokio && !matrix.coverage"
-        run: |
-          cd lightning-block-sync
-          cargo build --verbose --color always --features rest-client
-          cargo build --verbose --color always --features rpc-client
-          cargo build --verbose --color always --features rpc-client,rest-client
-          cargo build --verbose --color always --features rpc-client,rest-client,tokio
-      - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
-        if: matrix.coverage
-        run: |
-          cd lightning-block-sync
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rest-client
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client,tokio
-      - name: Build Transaction Sync Clients on Rust ${{ matrix.toolchain }} with features
-        if: "matrix.build-tx-sync && !matrix.coverage"
-        run: |
-          cd lightning-transaction-sync
-          cargo build --verbose --color always --features esplora-blocking
-          cargo build --verbose --color always --features esplora-async
-          cargo build --verbose --color always --features esplora-async-https
-      - name: Build transaction sync clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
-        if: "matrix.build-tx-sync && matrix.coverage"
+      - name: Install no-std-check dependencies for ARM Embedded
+        if: "matrix.platform == 'ubuntu-latest'"
         run: |
-          cd lightning-transaction-sync
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features esplora-blocking
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features esplora-async
-          RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features esplora-async-https
-      - name: Test transaction sync clients on Rust ${{ matrix.toolchain }} with features
-        if: "matrix.build-tx-sync"
-        run: |
-          cd lightning-transaction-sync
-          cargo test --verbose --color always --features esplora-blocking
-          cargo test --verbose --color always --features esplora-async
-          cargo test --verbose --color always --features esplora-async-https
-      - name: Test backtrace-debug builds on Rust ${{ matrix.toolchain }}
-        if: "matrix.toolchain == 'stable'"
-        shell: bash # Default on Winblows is powershell
-        run: |
-          cd lightning && RUST_BACKTRACE=1 cargo test --verbose --color always --features backtrace
-      - name: Test on Rust ${{ matrix.toolchain }} with net-tokio
-        if: "matrix.build-net-tokio && !matrix.coverage"
-        run: cargo test --verbose --color always
-      - name: Test on Rust ${{ matrix.toolchain }} with net-tokio, and full code-linking for coverage generation
-        if: matrix.coverage
-        run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
-      - name: Test no-std builds on Rust ${{ matrix.toolchain }}
-        if: "matrix.build-no-std && !matrix.coverage"
-        shell: bash # Default on Winblows is powershell
-        run: |
-          for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
-          cd $DIR
-            cargo test --verbose --color always --no-default-features --features no-std
-            # check if there is a conflict between no-std and the default std feature
-            cargo test --verbose --color always --features no-std
-            # check that things still pass without grind_signatures
-            # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
-            cargo test --verbose --color always --no-default-features --features std
-            # check if there is a conflict between no-std and the c_bindings cfg
-            RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
-            cd ..
-          done
-          # check no-std compatibility across dependencies
-          cd no-std-check
-          cargo check --verbose --color always --features lightning-transaction-sync
-      - name: Build no-std-check on Rust ${{ matrix.toolchain }} for ARM Embedded
-        if: "matrix.build-no-std && matrix.platform == 'ubuntu-latest'"
-        run: |
-          cd no-std-check
           rustup target add thumbv7m-none-eabi
           sudo apt-get -y install gcc-arm-none-eabi
-          cargo build --target=thumbv7m-none-eabi
-      - name: Test on no-std builds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
-        if: "matrix.build-no-std && matrix.coverage"
+      - name: shellcheck the CI script
+        if: "matrix.platform == 'ubuntu-latest'"
         run: |
-          cd lightning
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no-std
-      - name: Test futures builds on Rust ${{ matrix.toolchain }}
-        if: "matrix.build-futures && !matrix.coverage"
+          sudo apt-get -y install shellcheck
+          shellcheck ci/ci-tests.sh
+      - name: Run CI script with coverage generation
+        if: matrix.coverage
         shell: bash # Default on Winblows is powershell
-        run: |
-          cd lightning-background-processor
-          cargo test --verbose --color always --no-default-features --features futures
-      - name: Test futures builds on Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
-        if: "matrix.build-futures && matrix.coverage"
+        run: LDK_COVERAGE_BUILD=true ./ci/ci-tests.sh
+      - name: Run CI script
+        if: "!matrix.coverage"
         shell: bash # Default on Winblows is powershell
-        run: |
-          cd lightning-background-processor
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features futures
-      - name: Test on Rust ${{ matrix.toolchain }}
-        if: "! matrix.build-net-tokio"
-        run: |
-          cargo test --verbose --color always -p lightning
-          cargo test --verbose --color always -p lightning-invoice
-          cargo test --verbose --color always -p lightning-rapid-gossip-sync
-          cargo test --verbose --color always -p lightning-persister
-          cargo test --verbose --color always -p lightning-background-processor
-      - name: Test C Bindings Modifications on Rust ${{ matrix.toolchain }}
-        if: "! matrix.build-net-tokio"
-        run: |
-          RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always  -p lightning
-          RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always  -p lightning-invoice
-          RUSTFLAGS="--cfg=c_bindings" cargo build --verbose  --color always -p lightning-persister
-          RUSTFLAGS="--cfg=c_bindings" cargo build --verbose  --color always -p lightning-background-processor
-      - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features
-        if: "matrix.build-net-tokio && !matrix.coverage"
-        run: |
-          cd lightning-block-sync
-          cargo test --verbose --color always --features rest-client
-          cargo test --verbose --color always --features rpc-client
-          cargo test --verbose --color always --features rpc-client,rest-client
-          cargo test --verbose --color always --features rpc-client,rest-client,tokio
-      - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features and full code-linking for coverage generation
-        if: matrix.coverage
-        run: |
-          cd lightning-block-sync
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rest-client
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client,rest-client
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --features rpc-client,rest-client,tokio
-      - name: Test Custom Message Macros on Rust ${{ matrix.toolchain }}
-        if: "matrix.test-custom-message"
-        run: |
-          cd lightning-custom-message
-          cargo test --verbose --color always
+        run: ./ci/ci-tests.sh
       - name: Install deps for kcov
         if: matrix.coverage
         run: |
diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh
new file mode 100755 (executable)
index 0000000..7b0beb9
--- /dev/null
@@ -0,0 +1,91 @@
+#!/bin/bash
+set -eox pipefail
+
+RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }')
+HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
+
+# Tokio MSRV on versions newer than 1.14 is rustc 1.49
+[ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.0" --verbose
+[ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
+
+export RUST_BACKTRACE=1
+
+echo -e "\n\nBuilding and testing all workspace crates..."
+cargo build --verbose --color always
+cargo test --verbose --color always
+
+echo -e "\n\nBuilding with all Log-Limiting features"
+pushd lightning
+grep '^max_level_' Cargo.toml | awk '{ print $1 }'| while read -r FEATURE; do
+       cargo build --verbose --color always --features "$FEATURE"
+done
+popd
+
+if [ "$RUSTC_MINOR_VERSION" -gt 51 ]; then # Current `object` MSRV, subject to change
+       echo -e "\n\nTest backtrace-debug builds"
+       pushd lightning
+       cargo test --verbose --color always --features backtrace
+       popd
+fi
+
+echo -e "\n\nTesting no-std flags in various combinations"
+for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
+       pushd $DIR
+       cargo test --verbose --color always --no-default-features --features no-std
+       # check if there is a conflict between no-std and the default std feature
+       cargo test --verbose --color always --features no-std
+       # check that things still pass without grind_signatures
+       # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
+       cargo test --verbose --color always --no-default-features --features std
+       # check if there is a conflict between no-std and the c_bindings cfg
+       RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
+       popd
+done
+
+echo -e "\n\nTesting no-std build on a downstream no-std crate"
+# check no-std compatibility across dependencies
+pushd no-std-check
+cargo check --verbose --color always --features lightning-transaction-sync
+popd
+
+if [ -f "$(which arm-none-eabi-gcc)" ]; then
+       pushd no-std-check
+       cargo build --target=thumbv7m-none-eabi
+       popd
+fi
+
+echo -e "\n\nBuilding and testing Block Sync Clients with features"
+pushd lightning-block-sync
+cargo build --verbose --color always --features rest-client
+cargo test --verbose --color always --features rest-client
+cargo build --verbose --color always --features rpc-client
+cargo test --verbose --color always --features rpc-client
+cargo build --verbose --color always --features rpc-client,rest-client
+cargo test --verbose --color always --features rpc-client,rest-client
+cargo build --verbose --color always --features rpc-client,rest-client,tokio
+cargo test --verbose --color always --features rpc-client,rest-client,tokio
+popd
+
+if [[ $RUSTC_MINOR_VERSION -gt 67 && "$HOST_PLATFORM" != *windows* ]]; then
+       echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
+       pushd lightning-transaction-sync
+       cargo build --verbose --color always --features esplora-blocking
+       cargo test --verbose --color always --features esplora-blocking
+       cargo build --verbose --color always --features esplora-async
+       cargo test --verbose --color always --features esplora-async
+       cargo build --verbose --color always --features esplora-async-https
+       cargo test --verbose --color always --features esplora-async-https
+       popd
+fi
+
+echo -e "\n\nTest futures builds"
+pushd lightning-background-processor
+cargo test --verbose --color always --no-default-features --features futures
+popd
+
+if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
+       echo -e "\n\nTest Custom Message Macros"
+       pushd lightning-custom-message
+       cargo test --verbose --color always
+       popd
+fi
index ad0da138a85508d5bf38ff8902ab56d93796e066..568dcdf0250057c68e1cf946b4b543f97c8933c0 100644 (file)
@@ -268,6 +268,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                                inbound_htlc_minimum_msat: None,
                                                                inbound_htlc_maximum_msat: None,
                                                                config: None,
+                                                               feerate_sat_per_1000_weight: None,
                                                        });
                                                }
                                                Some(&first_hops_vec[..])
index 894c3b528df83f500683eb0e1c53a8fa7560fffb..9d42968d866bc8edfe9cf27b87fc3afa7a8907b7 100644 (file)
@@ -14,15 +14,14 @@ all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
 [features]
-rest-client = [ "serde", "serde_json", "chunked_transfer" ]
-rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
+rest-client = [ "serde_json", "chunked_transfer" ]
+rpc-client = [ "serde_json", "chunked_transfer" ]
 
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.114", path = "../lightning" }
 futures-util = { version = "0.3" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
-serde = { version = "1.0", features = ["derive"], optional = true }
 serde_json = { version = "1.0", optional = true }
 chunked_transfer = { version = "1.4", optional = true }
 
index ed28833b7b30d4986fe53e2801d2ddaf43dcf765..d6294e1d2a79518c05a674cae5ea86d41fc1faf7 100644 (file)
@@ -5,11 +5,9 @@ use crate::{BlockHeaderData, BlockSourceError};
 use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::consensus::encode;
 use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid};
-use bitcoin::hashes::hex::{FromHex, ToHex};
+use bitcoin::hashes::hex::FromHex;
 use bitcoin::Transaction;
 
-use serde::Deserialize;
-
 use serde_json;
 
 use std::convert::From;
@@ -46,7 +44,7 @@ impl TryInto<BlockHeaderData> for JsonResponse {
        type Error = std::io::Error;
 
        fn try_into(self) -> std::io::Result<BlockHeaderData> {
-               let mut header = match self.0 {
+               let header = match self.0 {
                        serde_json::Value::Array(mut array) if !array.is_empty() => array.drain(..).next().unwrap(),
                        serde_json::Value::Object(_) => self.0,
                        _ => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "unexpected JSON type")),
@@ -57,51 +55,34 @@ impl TryInto<BlockHeaderData> for JsonResponse {
                }
 
                // Add an empty previousblockhash for the genesis block.
-               if let None = header.get("previousblockhash") {
-                       let hash: BlockHash = BlockHash::all_zeros();
-                       header.as_object_mut().unwrap().insert("previousblockhash".to_string(), serde_json::json!(hash.to_hex()));
-               }
-
-               match serde_json::from_value::<GetHeaderResponse>(header) {
-                       Err(_) => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid header response")),
-                       Ok(response) => match response.try_into() {
-                               Err(_) => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid header data")),
-                               Ok(header) => Ok(header),
-                       },
+               match header.try_into() {
+                       Err(_) => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid header data")),
+                       Ok(header) => Ok(header),
                }
        }
 }
 
-/// Response data from `getblockheader` RPC and `headers` REST requests.
-#[derive(Deserialize)]
-struct GetHeaderResponse {
-       pub version: i32,
-       pub merkleroot: String,
-       pub time: u32,
-       pub nonce: u32,
-       pub bits: String,
-       pub previousblockhash: String,
-
-       pub chainwork: String,
-       pub height: u32,
-}
+impl TryFrom<serde_json::Value> for BlockHeaderData {
+       type Error = ();
 
-/// Converts from `GetHeaderResponse` to `BlockHeaderData`.
-impl TryFrom<GetHeaderResponse> for BlockHeaderData {
-       type Error = bitcoin::hashes::hex::Error;
+       fn try_from(response: serde_json::Value) -> Result<Self, ()> {
+               macro_rules! get_field { ($name: expr, $ty_access: tt) => {
+                       response.get($name).ok_or(())?.$ty_access().ok_or(())?
+               } }
 
-       fn try_from(response: GetHeaderResponse) -> Result<Self, bitcoin::hashes::hex::Error> {
                Ok(BlockHeaderData {
                        header: BlockHeader {
-                               version: response.version,
-                               prev_blockhash: BlockHash::from_hex(&response.previousblockhash)?,
-                               merkle_root: TxMerkleNode::from_hex(&response.merkleroot)?,
-                               time: response.time,
-                               bits: u32::from_be_bytes(<[u8; 4]>::from_hex(&response.bits)?),
-                               nonce: response.nonce,
+                               version: get_field!("version", as_i64).try_into().map_err(|_| ())?,
+                               prev_blockhash: if let Some(hash_str) = response.get("previousblockhash") {
+                                               BlockHash::from_hex(hash_str.as_str().ok_or(())?).map_err(|_| ())?
+                                       } else { BlockHash::all_zeros() },
+                               merkle_root: TxMerkleNode::from_hex(get_field!("merkleroot", as_str)).map_err(|_| ())?,
+                               time: get_field!("time", as_u64).try_into().map_err(|_| ())?,
+                               bits: u32::from_be_bytes(<[u8; 4]>::from_hex(get_field!("bits", as_str)).map_err(|_| ())?),
+                               nonce: get_field!("nonce", as_u64).try_into().map_err(|_| ())?,
                        },
-                       chainwork: hex_to_uint256(&response.chainwork)?,
-                       height: response.height,
+                       chainwork: hex_to_uint256(get_field!("chainwork", as_str)).map_err(|_| ())?,
+                       height: get_field!("height", as_u64).try_into().map_err(|_| ())?,
                })
        }
 }
@@ -250,6 +231,7 @@ pub(crate) mod tests {
        use super::*;
        use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::hashes::Hash;
+       use bitcoin::hashes::hex::ToHex;
        use bitcoin::network::constants::Network;
        use serde_json::value::Number;
        use serde_json::Value;
@@ -308,7 +290,7 @@ pub(crate) mod tests {
                match TryInto::<BlockHeaderData>::try_into(response) {
                        Err(e) => {
                                assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid header response");
+                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid header data");
                        },
                        Ok(_) => panic!("Expected error"),
                }
index 2a328f7a13324b46aa7a214ebb02a78ef035020e..06f11a91e240ae33207c9b4aa4e39ccac4183c41 100644 (file)
@@ -512,8 +512,10 @@ fn _create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_has
 /// * Always select the channel with the highest inbound capacity per counterparty node
 /// * Prefer channels with capacity at least `min_inbound_capacity_msat` and where the channel
 ///   `is_usable` (i.e. the peer is connected).
-/// * If any public channel exists, the returned `RouteHint`s will be empty, and the sender will
-///   need to find the path by looking at the public channels instead
+/// * If any public channel exists, only public [`RouteHint`]s will be returned.
+/// * If any public, announced, channel exists (i.e. a channel with 7+ confs, to ensure the
+///   announcement has had a chance to propagate), no [`RouteHint`]s will be returned, as the
+///   sender is expected to find the path by looking at the public channels instead.
 fn filter_channels<L: Deref>(
        channels: Vec<ChannelDetails>, min_inbound_capacity_msat: Option<u64>, logger: &L
 ) -> Vec<RouteHint> where L::Target: Logger {
@@ -522,6 +524,7 @@ fn filter_channels<L: Deref>(
        let mut min_capacity_channel_exists = false;
        let mut online_channel_exists = false;
        let mut online_min_capacity_channel_exists = false;
+       let mut has_pub_unconf_chan = false;
 
        log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
        for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
@@ -531,11 +534,18 @@ fn filter_channels<L: Deref>(
                }
 
                if channel.is_public {
-                       // If any public channel exists, return no hints and let the sender
-                       // look at the public channels instead.
-                       log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
-                               log_bytes!(channel.channel_id));
-                       return vec![]
+                       if channel.confirmations.is_some() && channel.confirmations < Some(7) {
+                               // If we have a public channel, but it doesn't have enough confirmations to (yet)
+                               // be in the public network graph (and have gotten a chance to propagate), include
+                               // route hints but only for public channels to protect private channel privacy.
+                               has_pub_unconf_chan = true;
+                       } else {
+                               // If any public channel exists, return no hints and let the sender
+                               // look at the public channels instead.
+                               log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
+                                       log_bytes!(channel.channel_id));
+                               return vec![]
+                       }
                }
 
                if channel.inbound_capacity_msat >= min_inbound_capacity {
@@ -557,20 +567,32 @@ fn filter_channels<L: Deref>(
                match filtered_channels.entry(channel.counterparty.node_id) {
                        hash_map::Entry::Occupied(mut entry) => {
                                let current_max_capacity = entry.get().inbound_capacity_msat;
-                               if channel.inbound_capacity_msat < current_max_capacity {
+                               // If this channel is public and the previous channel is not, ensure we replace the
+                               // previous channel to avoid announcing non-public channels.
+                               let new_now_public = channel.is_public && !entry.get().is_public;
+                               // If the public-ness of the channel has not changed (in which case simply defer to
+                               // `new_now_public), and this channel has a greater capacity, prefer to announce
+                               // this channel.
+                               let new_higher_capacity = channel.is_public == entry.get().is_public &&
+                                       channel.inbound_capacity_msat > current_max_capacity;
+                               if new_now_public || new_higher_capacity {
+                                       log_trace!(logger,
+                                               "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
+                                               log_pubkey!(channel.counterparty.node_id),
+                                               log_bytes!(channel.channel_id), channel.short_channel_id,
+                                               channel.inbound_capacity_msat,
+                                               log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+                                               current_max_capacity);
+                                       entry.insert(channel);
+                               } else {
                                        log_trace!(logger,
-                                               "Preferring counterparty {} channel {} ({} msats) over {} ({} msats) for invoice route hints",
+                                               "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
                                                log_pubkey!(channel.counterparty.node_id),
-                                               log_bytes!(entry.get().channel_id), current_max_capacity,
-                                               log_bytes!(channel.channel_id), channel.inbound_capacity_msat);
-                                       continue;
+                                               log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+                                               current_max_capacity,
+                                               log_bytes!(channel.channel_id), channel.short_channel_id,
+                                               channel.inbound_capacity_msat);
                                }
-                               log_trace!(logger,
-                                       "Preferring counterparty {} channel {} ({} msats) over {} ({} msats) for invoice route hints",
-                                       log_pubkey!(channel.counterparty.node_id),
-                                       log_bytes!(channel.channel_id), channel.inbound_capacity_msat,
-                                       log_bytes!(entry.get().channel_id), current_max_capacity);
-                               entry.insert(channel);
                        }
                        hash_map::Entry::Vacant(entry) => {
                                entry.insert(channel);
@@ -600,7 +622,12 @@ fn filter_channels<L: Deref>(
                .map(|(_, channel)| channel)
                .filter(|channel| {
                        let has_enough_capacity = channel.inbound_capacity_msat >= min_inbound_capacity;
-                       let include_channel = if online_min_capacity_channel_exists {
+                       let include_channel = if has_pub_unconf_chan {
+                               // If we have a public channel, but it doesn't have enough confirmations to (yet)
+                               // be in the public network graph (and have gotten a chance to propagate), include
+                               // route hints but only for public channels to protect private channel privacy.
+                               channel.is_public
+                       } else if online_min_capacity_channel_exists {
                                has_enough_capacity && channel.is_usable
                        } else if min_capacity_channel_exists && online_channel_exists {
                                // If there are some online channels and some min_capacity channels, but no
@@ -620,7 +647,7 @@ fn filter_channels<L: Deref>(
                                log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints",
                                        log_bytes!(channel.channel_id));
                        } else {
-                               debug_assert!(!channel.is_usable);
+                               debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public));
                                log_trace!(logger, "Ignoring channel {} with disconnected peer",
                                        log_bytes!(channel.channel_id));
                        }
@@ -789,6 +816,63 @@ mod test {
                assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&payment_hash.0[..]).unwrap());
        }
 
+       #[test]
+       fn test_hints_has_only_public_confd_channels() {
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let mut config = test_default_channel_config();
+               config.channel_handshake_config.minimum_depth = 1;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Create a private channel with lots of capacity and a lower value public channel (without
+               // confirming the funding tx yet).
+               let unannounced_scid = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0);
+               let conf_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 10_000, 0);
+
+               // Before the channel is available, we should include the unannounced_scid.
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(unannounced_scid.0.short_channel_id_alias.unwrap());
+               match_invoice_routes(Some(5000), &nodes[1], scid_aliases.clone());
+
+               // However after we mine the funding tx and exchange channel_ready messages for the public
+               // channel we'll immediately switch to including it as a route hint, even though it isn't
+               // yet announced.
+               let pub_channel_scid = mine_transaction(&nodes[0], &conf_tx);
+               let node_a_pub_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &node_a_pub_channel_ready);
+
+               assert_eq!(mine_transaction(&nodes[1], &conf_tx), pub_channel_scid);
+               let events = nodes[1].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 2);
+               if let MessageSendEvent::SendChannelReady { msg, .. } = &events[0] {
+                       nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), msg);
+               } else { panic!(); }
+               if let MessageSendEvent::SendChannelUpdate { msg, .. } = &events[1] {
+                       nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), msg);
+               } else { panic!(); }
+
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()));
+
+               expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
+               expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+               scid_aliases.clear();
+               scid_aliases.insert(node_a_pub_channel_ready.short_channel_id_alias.unwrap());
+               match_invoice_routes(Some(5000), &nodes[1], scid_aliases.clone());
+               // This also applies even if the amount is more than the payment amount, to ensure users
+               // dont screw up their privacy.
+               match_invoice_routes(Some(50_000_000), &nodes[1], scid_aliases.clone());
+
+               // The same remains true until the channel has 7 confirmations, at which point we include
+               // no hints.
+               connect_blocks(&nodes[1], 5);
+               match_invoice_routes(Some(5000), &nodes[1], scid_aliases.clone());
+               connect_blocks(&nodes[1], 1);
+               get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, nodes[0].node.get_our_node_id());
+               match_invoice_routes(Some(5000), &nodes[1], HashSet::new());
+       }
+
        #[test]
        fn test_hints_includes_single_channels_to_nodes() {
                let chanmon_cfgs = create_chanmon_cfgs(3);
index cd18ffad12d385747da26ee6e7222f9dc457b5f3..1ebb65534b3f3d34af093e0c11260aa723bcc2ce 100644 (file)
@@ -4012,7 +4012,7 @@ mod tests {
        use crate::ln::functional_test_utils::*;
        use crate::ln::script::ShutdownScript;
        use crate::util::errors::APIError;
-       use crate::util::events::{ClosureReason, MessageSendEventsProvider};
+       use crate::util::events::ClosureReason;
        use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
        use crate::util::ser::{ReadableArgs, Writeable};
        use crate::sync::{Arc, Mutex};
index 18e461f61a3c0da0ca74c8579542e7571bc39bf2..5a39ef18cd6aac839a33b02ab29c1896e7e72a91 100644 (file)
@@ -149,7 +149,7 @@ fn test_monitor_and_persister_update_fail() {
                        // because the update is bogus, ultimately the error that's returned
                        // should be a PermanentFailure.
                        if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
-                       logger.assert_log_regex("lightning::chain::chainmonitor".to_string(), regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+                       logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
                        assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
        }
index 1a1409c654d0ac08d1f8464abb44c6a838895171..7a389b573e6862d2ead48613e95d8356b0f7dc1c 100644 (file)
@@ -4777,7 +4777,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        })
        }
 
-       pub fn get_feerate(&self) -> u32 {
+       pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
                self.feerate_per_kw
        }
 
index 89cb317dc6c6705effe175a54e98d437727b9c92..90afc0169f6b1c0a3246b48d40c8a20145427964 100644 (file)
@@ -1120,6 +1120,11 @@ pub struct ChannelDetails {
        /// inbound. This may be zero for inbound channels serialized with LDK versions prior to
        /// 0.0.113.
        pub user_channel_id: u128,
+       /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
+       /// which is applied to commitment and HTLC transactions.
+       ///
+       /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
+       pub feerate_sat_per_1000_weight: Option<u32>,
        /// Our total balance.  This is the amount we would get if we close the channel.
        /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
        /// amount is not likely to be recoverable on close.
@@ -1262,6 +1267,7 @@ impl ChannelDetails {
                        outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
                        inbound_scid_alias: channel.latest_inbound_scid_alias(),
                        channel_value_satoshis: channel.get_value_satoshis(),
+                       feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
                        unspendable_punishment_reserve: to_self_reserve_satoshis,
                        balance_msat: balance.balance_msat,
                        inbound_capacity_msat: balance.inbound_capacity_msat,
@@ -3515,18 +3521,18 @@ where
        fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
                if !chan.is_outbound() { return NotifyOption::SkipPersist; }
                // If the feerate has decreased by less than half, don't bother
-               if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+               if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
-                               log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                               log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersist;
                }
                if !chan.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
-                               log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                               log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersist;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
-                       log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+                       log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
 
                chan.queue_update_fee(new_feerate, &self.logger);
                NotifyOption::DoPersist
@@ -6588,6 +6594,7 @@ impl Writeable for ChannelDetails {
                        (33, self.inbound_htlc_minimum_msat, option),
                        (35, self.inbound_htlc_maximum_msat, option),
                        (37, user_channel_id_high_opt, option),
+                       (39, self.feerate_sat_per_1000_weight, option),
                });
                Ok(())
        }
@@ -6623,6 +6630,7 @@ impl Readable for ChannelDetails {
                        (33, inbound_htlc_minimum_msat, option),
                        (35, inbound_htlc_maximum_msat, option),
                        (37, user_channel_id_high_opt, option),
+                       (39, feerate_sat_per_1000_weight, option),
                });
 
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
@@ -6656,6 +6664,7 @@ impl Readable for ChannelDetails {
                        is_public: is_public.0.unwrap(),
                        inbound_htlc_minimum_msat,
                        inbound_htlc_maximum_msat,
+                       feerate_sat_per_1000_weight,
                })
        }
 }
@@ -8212,7 +8221,7 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 
-               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
        }
 
        #[test]
@@ -8255,7 +8264,7 @@ mod tests {
                assert!(updates.update_fee.is_none());
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 
-               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1);
        }
 
        #[test]
@@ -8283,7 +8292,8 @@ mod tests {
 
                match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() {
                        PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
-                               assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))                        },
+                               assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))
+                       },
                        _ => panic!("unexpected error")
                }
        }
@@ -8343,7 +8353,7 @@ mod tests {
                match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
                        Ok(_) => panic!("Unexpected ok"),
                        Err(()) => {
-                               nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
+                               nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
                        }
                }
 
index c8d3beb3cba8e3d899e96fc2717c8d4e7893660d..f48c9d099e7e3eb3b03d4cf5f92cfca1d37f4b4b 100644 (file)
@@ -63,9 +63,12 @@ pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Tran
        scid
 }
 /// Mine a single block containing the given transaction
-pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
+///
+/// Returns the SCID a channel confirmed in the given transaction will have, assuming the funding
+/// output is the 1st output in the transaction.
+pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> u64 {
        let height = node.best_block_info().1 + 1;
-       confirm_transaction_at(node, tx, height);
+       confirm_transaction_at(node, tx, height)
 }
 /// Mine a single block containing the given transactions
 pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Transaction]) {
@@ -697,7 +700,7 @@ macro_rules! get_feerate {
                        let mut per_peer_state_lock;
                        let mut peer_state_lock;
                        let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
-                       chan.get_feerate()
+                       chan.get_feerate_sat_per_1000_weight()
                }
        }
 }
@@ -1098,6 +1101,10 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &
        nodes[a].node.handle_funding_signed(&nodes[b].node.get_our_node_id(), &cs_funding_signed);
        check_added_monitors!(nodes[a], 1);
 
+       assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
+       nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+
        let conf_height = core::cmp::max(nodes[a].best_block_info().1 + 1, nodes[b].best_block_info().1 + 1);
        confirm_transaction_at(&nodes[a], &tx, conf_height);
        connect_blocks(&nodes[a], CHAN_CONFIRM_DEPTH - 1);
index 4fe5d041028c4a0fefbf8c4ea3dc6e69bda2f88b..9a6870929334608f963179053bb35c0a0d5f133d 100644 (file)
@@ -94,7 +94,7 @@ fn test_insane_channel_opens() {
                if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
                        match action {
                                &ErrorAction::SendErrorMessage { .. } => {
-                                       nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1);
+                                       nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1);
                                },
                                _ => panic!("unexpected event!"),
                        }
@@ -1118,7 +1118,7 @@ fn holding_cell_htlc_counting() {
                unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)), true, APIError::ChannelUnavailable { ref err },
                        assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
        }
 
        // This should also be true if we try to forward a payment.
@@ -1346,7 +1346,7 @@ fn test_basic_channel_reserve() {
                _ => panic!("Unexpected error variant"),
        }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1);
 
        send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
 }
@@ -1811,7 +1811,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
                unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
                        assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
        }
 
        // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
@@ -1906,7 +1906,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
                unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
                        assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 2);
+               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2);
        }
 
        let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
@@ -5987,7 +5987,7 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
        unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
                assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1);
 }
 
 #[test]
@@ -6005,7 +6005,7 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
                assert_eq!(err, "Cannot send 0-msat HTLC"));
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1);
 }
 
 #[test]
@@ -6088,7 +6088,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
                assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
 }
 
 #[test]
@@ -6112,7 +6112,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
                assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
 
        send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
 }
@@ -9502,7 +9502,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                }
                nodes[0].node.timer_tick_occurred();
                check_added_monitors!(nodes[0], 1);
-               nodes[0].logger.assert_log_contains("lightning::ln::channel".to_string(), "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure".to_string(), 1);
+               nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1);
        }
 
        let _ = nodes[0].node.get_and_clear_pending_msg_events();
index 30d6eccd481e66fd9fd80377b1a900b3ed970160..330eee8be06b1ac3b0ea5aca021b04486fd03787 100644 (file)
@@ -423,7 +423,7 @@ fn test_inbound_scid_privacy() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true);
 
-       nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1);
+       nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", regex::Regex::new(r"Refusing to forward over real channel SCID as our counterparty requested").unwrap(), 1);
 
        let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
index aee7f562defc5915517d696cf032353deee4e3d6..89582305c2107baabcce522937ab13f793b0efbc 100644 (file)
@@ -20,6 +20,7 @@ use bitcoin::network::constants::Network;
 use bitcoin::secp256k1::{PublicKey, Secp256k1};
 
 use crate::io;
+use crate::io_extras::read_to_end;
 use crate::sync::Arc;
 
 struct MessengerNode {
@@ -59,8 +60,7 @@ impl CustomOnionMessageHandler for TestCustomMessageHandler {
        fn handle_custom_message(&self, _msg: Self::CustomMessage) {}
        fn read_custom_message<R: io::Read>(&self, message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, DecodeError> where Self: Sized {
                if message_type == CUSTOM_MESSAGE_TYPE {
-                       let mut buf = Vec::new();
-                       buffer.read_to_end(&mut buf)?;
+                       let buf = read_to_end(buffer)?;
                        assert_eq!(buf, CUSTOM_MESSAGE_CONTENTS);
                        return Ok(Some(TestCustomMessage {}))
                }
@@ -104,8 +104,8 @@ fn pass_along_path(path: &Vec<MessengerNode>, expected_path_id: Option<[u8; 32]>
                node.messenger.handle_onion_message(&prev_node.get_node_pk(), &onion_msg);
                if idx == num_nodes - 1 {
                        node.logger.assert_log_contains(
-                               "lightning::onion_message::messenger".to_string(),
-                               format!("Received an onion message with path_id: {:02x?}", expected_path_id).to_string(), 1);
+                               "lightning::onion_message::messenger",
+                               &format!("Received an onion message with path_id: {:02x?}", expected_path_id), 1);
                }
                prev_node = node;
        }
@@ -218,8 +218,8 @@ fn reply_path() {
        pass_along_path(&nodes, None);
        // Make sure the last node successfully decoded the reply path.
        nodes[3].logger.assert_log_contains(
-               "lightning::onion_message::messenger".to_string(),
-               format!("Received an onion message with path_id None and a reply_path").to_string(), 1);
+               "lightning::onion_message::messenger",
+               &format!("Received an onion message with path_id None and a reply_path"), 1);
 
        // Destination::BlindedPath
        let blinded_path = BlindedPath::new(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
@@ -228,8 +228,8 @@ fn reply_path() {
        nodes[0].messenger.send_onion_message(&[], Destination::BlindedPath(blinded_path), OnionMessageContents::Custom(test_msg), Some(reply_path)).unwrap();
        pass_along_path(&nodes, None);
        nodes[3].logger.assert_log_contains(
-               "lightning::onion_message::messenger".to_string(),
-               format!("Received an onion message with path_id None and a reply_path").to_string(), 2);
+               "lightning::onion_message::messenger",
+               &format!("Received an onion message with path_id None and a reply_path"), 2);
 }
 
 #[test]
index 9682503ee45c22d5062a099f5c1af015bbe3ffc0..baa2ecce1b7f3dae5037d61b84b67cbcc0723a20 100644 (file)
@@ -2208,6 +2208,7 @@ mod tests {
                        inbound_htlc_minimum_msat: None,
                        inbound_htlc_maximum_msat: None,
                        config: None,
+                       feerate_sat_per_1000_weight: None
                }
        }
 
@@ -5737,6 +5738,7 @@ mod benches {
                        inbound_htlc_minimum_msat: None,
                        inbound_htlc_maximum_msat: None,
                        config: None,
+                       feerate_sat_per_1000_weight: None,
                }
        }
 
index 344f5b4c62d01736afd85654064735c68f0456bd..ddc09f0b37d8d3a462755cc78e5a32a5bbb74ac7 100644 (file)
@@ -642,10 +642,10 @@ impl TestLogger {
        /// 1. belongs to the specified module and
        /// 2. contains `line` in it.
        /// And asserts if the number of occurrences is the same with the given `count`
-       pub fn assert_log_contains(&self, module: String, line: String, count: usize) {
+       pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
-                       m == &module && l.contains(line.as_str())
+                       m == module && l.contains(line)
                }).map(|(_, c) | { c }).sum();
                assert_eq!(l, count)
        }
@@ -654,10 +654,10 @@ impl TestLogger {
        /// 1. belong to the specified module and
        /// 2. match the given regex pattern.
        /// Assert that the number of occurrences equals the given `count`
-       pub fn assert_log_regex(&self, module: String, pattern: regex::Regex, count: usize) {
+       pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) {
                let log_entries = self.lines.lock().unwrap();
                let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
-                       m == &module && pattern.is_match(&l)
+                       m == module && pattern.is_match(&l)
                }).map(|(_, c) | { c }).sum();
                assert_eq!(l, count)
        }