Merge pull request #2609 from wpaulino/monitor-get-spendable-output
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 29 Sep 2023 01:29:47 +0000 (01:29 +0000)
committerGitHub <noreply@github.com>
Fri, 29 Sep 2023 01:29:47 +0000 (01:29 +0000)
Allow retrieval of SpendableOutputDescriptors from relevant transactions

31 files changed:
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-custom-message/Cargo.toml
lightning-invoice/Cargo.toml
lightning-invoice/src/lib.rs
lightning-invoice/src/time_utils.rs [deleted symlink]
lightning-net-tokio/Cargo.toml
lightning-persister/Cargo.toml
lightning-persister/src/fs_store.rs
lightning-persister/src/test_utils.rs
lightning-persister/src/utils.rs
lightning-rapid-gossip-sync/Cargo.toml
lightning-transaction-sync/Cargo.toml
lightning/Cargo.toml
lightning/src/chain/package.rs
lightning/src/events/mod.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/reload_tests.rs
lightning/src/routing/router.rs
lightning/src/sign/mod.rs
lightning/src/util/persist.rs
lightning/src/util/test_utils.rs
pending_changelog/kvstore.txt
pending_changelog/monitorupdatingpersister.txt [new file with mode: 0644]

index 26de46cb8a884e0005a9accae2d4619ff4ee6b7c..9ae4e30d1025673c250f20c09f6bf5ac1d609c4f 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-background-processor"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Valentine Wallace <vwallace@protonmail.com>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -22,11 +22,11 @@ default = ["std"]
 
 [dependencies]
 bitcoin = { version = "0.29.0", default-features = false }
-lightning = { version = "0.0.117-alpha1", path = "../lightning", default-features = false }
-lightning-rapid-gossip-sync = { version = "0.0.117-alpha1", path = "../lightning-rapid-gossip-sync", default-features = false }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", default-features = false }
+lightning-rapid-gossip-sync = { version = "0.0.117-alpha2", path = "../lightning-rapid-gossip-sync", default-features = false }
 
 [dev-dependencies]
 tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
-lightning = { version = "0.0.117-alpha1", path = "../lightning", features = ["_test_utils"] }
-lightning-invoice = { version = "0.25.0-alpha1", path = "../lightning-invoice" }
-lightning-persister = { version = "0.0.117-alpha1", path = "../lightning-persister" }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", features = ["_test_utils"] }
+lightning-invoice = { version = "0.25.0-alpha2", path = "../lightning-invoice" }
+lightning-persister = { version = "0.0.117-alpha2", path = "../lightning-persister" }
index 7ae14b4b4aabd5e92c3a610b5cfd622c2314264f..efa1a42142c7c428405f9211019680e997aeaa57 100644 (file)
@@ -506,10 +506,10 @@ use core::task;
 /// # use lightning_background_processor::{process_events_async, GossipSync};
 /// # struct MyStore {}
 /// # impl lightning::util::persist::KVStore for MyStore {
-/// #     fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
-/// #     fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
-/// #     fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
-/// #     fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
+/// #     fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
+/// #     fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
+/// #     fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
+/// #     fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
 /// # }
 /// # struct MyEventHandler {}
 /// # impl MyEventHandler {
@@ -868,7 +868,10 @@ mod tests {
        use lightning::util::config::UserConfig;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
-       use lightning::util::persist::{KVStore, CHANNEL_MANAGER_PERSISTENCE_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_SUB_NAMESPACE, SCORER_PERSISTENCE_KEY};
+       use lightning::util::persist::{KVStore,
+               CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
+               NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
+               SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
        use lightning_persister::fs_store::FilesystemStore;
        use std::collections::VecDeque;
        use std::{fs, env};
@@ -983,13 +986,13 @@ mod tests {
        }
 
        impl KVStore for Persister {
-               fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
-                       self.kv_store.read(namespace, sub_namespace, key)
+               fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
+                       self.kv_store.read(primary_namespace, secondary_namespace, key)
                }
 
-               fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
-                       if namespace == CHANNEL_MANAGER_PERSISTENCE_NAMESPACE &&
-                               sub_namespace == CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE &&
+               fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
+                       if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
+                               secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
                                key == CHANNEL_MANAGER_PERSISTENCE_KEY
                        {
                                if let Some((error, message)) = self.manager_error {
@@ -997,8 +1000,8 @@ mod tests {
                                }
                        }
 
-                       if namespace == NETWORK_GRAPH_PERSISTENCE_NAMESPACE &&
-                               sub_namespace == NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE &&
+                       if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
+                               secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
                                key == NETWORK_GRAPH_PERSISTENCE_KEY
                        {
                                if let Some(sender) = &self.graph_persistence_notifier {
@@ -1013,8 +1016,8 @@ mod tests {
                                }
                        }
 
-                       if namespace == SCORER_PERSISTENCE_NAMESPACE &&
-                               sub_namespace == SCORER_PERSISTENCE_SUB_NAMESPACE &&
+                       if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
+                               secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
                                key == SCORER_PERSISTENCE_KEY
                        {
                                if let Some((error, message)) = self.scorer_error {
@@ -1022,15 +1025,15 @@ mod tests {
                                }
                        }
 
-                       self.kv_store.write(namespace, sub_namespace, key, buf)
+                       self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
                }
 
-               fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
-                       self.kv_store.remove(namespace, sub_namespace, key, lazy)
+               fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
+                       self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
                }
 
-               fn list(&self, namespace: &str, sub_namespace: &str) -> lightning::io::Result<Vec<String>> {
-                       self.kv_store.list(namespace, sub_namespace)
+               fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
+                       self.kv_store.list(primary_namespace, secondary_namespace)
                }
        }
 
index 79cf2c9b7dbfb02b60719b119685f3eb1e81f875..90dd86eb3542654905b095d2d9c58ee30692c50d 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-block-sync"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Jeffrey Czyz", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -19,11 +19,11 @@ rpc-client = [ "serde_json", "chunked_transfer" ]
 
 [dependencies]
 bitcoin = "0.29.0"
-lightning = { version = "0.0.117-alpha1", path = "../lightning" }
+lightning = { version = "0.0.117-alpha2", path = "../lightning" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
 serde_json = { version = "1.0", optional = true }
 chunked_transfer = { version = "1.4", optional = true }
 
 [dev-dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", features = ["_test_utils"] }
 tokio = { version = "1.14", features = [ "macros", "rt" ] }
index 8fc7034f9669192a3e11d5ebe1f6f30cf85b31a9..c75230af1a5e7829bfec45fbd940ff24c41a533b 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-custom-message"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Jeffrey Czyz"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -15,4 +15,4 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.29.0"
-lightning = { version = "0.0.117-alpha1", path = "../lightning" }
+lightning = { version = "0.0.117-alpha2", path = "../lightning" }
index 6774920cb92fcfb282b884939e68d0163a8b734f..2e599800d24d4138bd87122e45959e0a07d91140 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "lightning-invoice"
 description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.25.0-alpha1"
+version = "0.25.0-alpha2"
 authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
 documentation = "https://docs.rs/lightning-invoice/"
 license = "MIT OR Apache-2.0"
@@ -21,7 +21,7 @@ std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"]
 
 [dependencies]
 bech32 = { version = "0.9.0", default-features = false }
-lightning = { version = "0.0.117-alpha1", path = "../lightning", default-features = false }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", default-features = false }
 secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"] }
 num-traits = { version = "0.2.8", default-features = false }
 bitcoin_hashes = { version = "0.11", default-features = false }
@@ -30,6 +30,6 @@ serde = { version = "1.0.118", optional = true }
 bitcoin = { version = "0.29.0", default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", default-features = false, features = ["_test_utils"] }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", default-features = false, features = ["_test_utils"] }
 hex = "0.4"
 serde_json = { version = "1"}
index b78c28dfc29617528a55e68a3a978b3959a1f8fd..9695d79036ecc6a776e9ab10e340f0a94a7463d1 100644 (file)
@@ -30,8 +30,6 @@ compile_error!("at least one of the `std` or `no-std` features must be enabled")
 pub mod payment;
 pub mod utils;
 
-pub(crate) mod time_utils;
-
 extern crate bech32;
 extern crate bitcoin_hashes;
 #[macro_use] extern crate lightning;
diff --git a/lightning-invoice/src/time_utils.rs b/lightning-invoice/src/time_utils.rs
deleted file mode 120000 (symlink)
index 5326cff..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../lightning/src/util/time.rs
\ No newline at end of file
index f429931360030ea6cc2fbd97c53fca8dd1941b19..8c67a4b131b8ff583e3e30d15be000dca3f7f2a3 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-net-tokio"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
@@ -16,9 +16,9 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.29.0"
-lightning = { version = "0.0.117-alpha1", path = "../lightning" }
+lightning = { version = "0.0.117-alpha2", path = "../lightning" }
 tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
 tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
-lightning = { version = "0.0.117-alpha1", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", features = ["_test_utils"] }
index 206815301fbaf4517dfe0d9fcbaebd9abc38d09f..43d97ebbe1240f17a29bc895c88b1b4fb7a8e4d3 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-persister"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Valentine Wallace", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -15,7 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
 bitcoin = "0.29.0"
-lightning = { version = "0.0.117-alpha1", path = "../lightning" }
+lightning = { version = "0.0.117-alpha2", path = "../lightning" }
 
 [target.'cfg(windows)'.dependencies]
 windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
@@ -24,5 +24,5 @@ windows-sys = { version = "0.48.0", default-features = false, features = ["Win32
 criterion = { version = "0.4", optional = true, default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", features = ["_test_utils"] }
 bitcoin = { version = "0.29.0", default-features = false }
index 42b28018fe9202ce4435a41dc87907433b156e7d..3475544849f9420398851818c0d805c4d7f48e6f 100644 (file)
@@ -67,7 +67,7 @@ impl FilesystemStore {
                }
        }
 
-       fn get_dest_dir_path(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<PathBuf> {
+       fn get_dest_dir_path(&self, primary_namespace: &str, secondary_namespace: &str) -> std::io::Result<PathBuf> {
                let mut dest_dir_path = {
                        #[cfg(target_os = "windows")]
                        {
@@ -81,9 +81,9 @@ impl FilesystemStore {
                        }
                };
 
-               dest_dir_path.push(namespace);
-               if !sub_namespace.is_empty() {
-                       dest_dir_path.push(sub_namespace);
+               dest_dir_path.push(primary_namespace);
+               if !secondary_namespace.is_empty() {
+                       dest_dir_path.push(secondary_namespace);
                }
 
                Ok(dest_dir_path)
@@ -91,10 +91,10 @@ impl FilesystemStore {
 }
 
 impl KVStore for FilesystemStore {
-       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> std::io::Result<Vec<u8>> {
-               check_namespace_key_validity(namespace, sub_namespace, Some(key), "read")?;
+       fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> std::io::Result<Vec<u8>> {
+               check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?;
 
-               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
                dest_file_path.push(key);
 
                let mut buf = Vec::new();
@@ -114,10 +114,10 @@ impl KVStore for FilesystemStore {
                Ok(buf)
        }
 
-       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> std::io::Result<()> {
-               check_namespace_key_validity(namespace, sub_namespace, Some(key), "write")?;
+       fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> std::io::Result<()> {
+               check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?;
 
-               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
                dest_file_path.push(key);
 
                let parent_directory = dest_file_path
@@ -201,10 +201,10 @@ impl KVStore for FilesystemStore {
                res
        }
 
-       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> std::io::Result<()> {
-               check_namespace_key_validity(namespace, sub_namespace, Some(key), "remove")?;
+       fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> std::io::Result<()> {
+               check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?;
 
-               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
                dest_file_path.push(key);
 
                if !dest_file_path.is_file() {
@@ -290,10 +290,10 @@ impl KVStore for FilesystemStore {
                Ok(())
        }
 
-       fn list(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<Vec<String>> {
-               check_namespace_key_validity(namespace, sub_namespace, None, "list")?;
+       fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> std::io::Result<Vec<String>> {
+               check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?;
 
-               let prefixed_dest = self.get_dest_dir_path(namespace, sub_namespace)?;
+               let prefixed_dest = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
                let mut keys = Vec::new();
 
                if !Path::new(&prefixed_dest).exists() {
@@ -320,7 +320,7 @@ impl KVStore for FilesystemStore {
 
                        let metadata = p.metadata()?;
 
-                       // We allow the presence of directories in the empty namespace and just skip them.
+                       // We allow the presence of directories in the empty primary namespace and just skip them.
                        if metadata.is_dir() {
                                continue;
                        }
@@ -328,9 +328,9 @@ impl KVStore for FilesystemStore {
                        // If we otherwise don't find a file at the given path something went wrong.
                        if !metadata.is_file() {
                                debug_assert!(false, "Failed to list keys of {}/{}: file couldn't be accessed.",
-                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                       PrintableString(primary_namespace), PrintableString(secondary_namespace));
                                let msg = format!("Failed to list keys of {}/{}: file couldn't be accessed.",
-                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                       PrintableString(primary_namespace), PrintableString(secondary_namespace));
                                return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                        }
 
@@ -342,17 +342,17 @@ impl KVStore for FilesystemStore {
                                                }
                                        } else {
                                                debug_assert!(false, "Failed to list keys of {}/{}: file path is not valid UTF-8",
-                                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                                       PrintableString(primary_namespace), PrintableString(secondary_namespace));
                                                let msg = format!("Failed to list keys of {}/{}: file path is not valid UTF-8",
-                                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                                       PrintableString(primary_namespace), PrintableString(secondary_namespace));
                                                return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                                        }
                                }
                                Err(e) => {
                                        debug_assert!(false, "Failed to list keys of {}/{}: {}",
-                                               PrintableString(namespace), PrintableString(sub_namespace), e);
+                                               PrintableString(primary_namespace), PrintableString(secondary_namespace), e);
                                        let msg = format!("Failed to list keys of {}/{}: {}",
-                                               PrintableString(namespace), PrintableString(sub_namespace), e);
+                                               PrintableString(primary_namespace), PrintableString(secondary_namespace), e);
                                        return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                                }
                        }
index 91557500f3d1e8cf9756e7eb3424446a752825ba..360fa3492bff7196682e447f94ae52745e1e0678 100644 (file)
@@ -12,34 +12,35 @@ use std::panic::RefUnwindSafe;
 pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_store: &K) {
        let data = [42u8; 32];
 
-       let namespace = "testspace";
-       let sub_namespace = "testsubspace";
+       let primary_namespace = "testspace";
+       let secondary_namespace = "testsubspace";
        let key = "testkey";
 
        // Test the basic KVStore operations.
-       kv_store.write(namespace, sub_namespace, key, &data).unwrap();
+       kv_store.write(primary_namespace, secondary_namespace, key, &data).unwrap();
 
-       // Test empty namespace/sub_namespace is allowed, but not empty namespace and non-empty
-       // sub-namespace, and not empty key.
+       // Test empty primary_namespace/secondary_namespace is allowed, but not empty primary_namespace
+       // and non-empty secondary_namespace, and not empty key.
        kv_store.write("", "", key, &data).unwrap();
-       let res = std::panic::catch_unwind(|| kv_store.write("", sub_namespace, key, &data));
+       let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data));
        assert!(res.is_err());
-       let res = std::panic::catch_unwind(|| kv_store.write(namespace, sub_namespace, "", &data));
+       let res = std::panic::catch_unwind(|| kv_store.write(primary_namespace, secondary_namespace, "", &data));
        assert!(res.is_err());
 
-       let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+       let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
        assert_eq!(listed_keys.len(), 1);
        assert_eq!(listed_keys[0], key);
 
-       let read_data = kv_store.read(namespace, sub_namespace, key).unwrap();
+       let read_data = kv_store.read(primary_namespace, secondary_namespace, key).unwrap();
        assert_eq!(data, &*read_data);
 
-       kv_store.remove(namespace, sub_namespace, key, false).unwrap();
+       kv_store.remove(primary_namespace, secondary_namespace, key, false).unwrap();
 
-       let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+       let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
        assert_eq!(listed_keys.len(), 0);
 
-       // Ensure we have no issue operating with namespace/sub_namespace/key being KVSTORE_NAMESPACE_KEY_MAX_LEN
+       // Ensure we have no issue operating with primary_namespace/secondary_namespace/key being
+       // KVSTORE_NAMESPACE_KEY_MAX_LEN
        let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect();
        kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap();
 
index 54ec230de2deb90adda7dc506e96306dd4ca6360..59a615937c94b4750093ed87968dbfafd3c94e00 100644 (file)
@@ -6,51 +6,53 @@ pub(crate) fn is_valid_kvstore_str(key: &str) -> bool {
        key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c))
 }
 
-pub(crate) fn check_namespace_key_validity(namespace: &str, sub_namespace: &str, key: Option<&str>, operation: &str) -> Result<(), std::io::Error> {
+pub(crate) fn check_namespace_key_validity(
+       primary_namespace: &str, secondary_namespace: &str, key: Option<&str>, operation: &str)
+-> Result<(), std::io::Error> {
        if let Some(key) = key {
                if key.is_empty() {
                        debug_assert!(false, "Failed to {} {}/{}/{}: key may not be empty.", operation,
-                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                               PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
                        let msg = format!("Failed to {} {}/{}/{}: key may not be empty.", operation,
-                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                               PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
                        return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                }
 
-               if namespace.is_empty() && !sub_namespace.is_empty() {
+               if primary_namespace.is_empty() && !secondary_namespace.is_empty() {
                        debug_assert!(false,
-                               "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               "Failed to {} {}/{}/{}: primary namespace may not be empty if a non-empty secondary namespace is given.",
                                operation,
-                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                               PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
                        let msg = format!(
-                               "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.", operation,
-                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                               "Failed to {} {}/{}/{}: primary namespace may not be empty if a non-empty secondary namespace is given.", operation,
+                               PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
                        return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                }
 
-               if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) || !is_valid_kvstore_str(key) {
-                       debug_assert!(false, "Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+               if !is_valid_kvstore_str(primary_namespace) || !is_valid_kvstore_str(secondary_namespace) || !is_valid_kvstore_str(key) {
+                       debug_assert!(false, "Failed to {} {}/{}/{}: primary namespace, secondary namespace, and key must be valid.",
                                operation,
-                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
-                       let msg = format!("Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+                               PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
+                       let msg = format!("Failed to {} {}/{}/{}: primary namespace, secondary namespace, and key must be valid.",
                                operation,
-                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                               PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
                        return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                }
        } else {
-               if namespace.is_empty() && !sub_namespace.is_empty() {
+               if primary_namespace.is_empty() && !secondary_namespace.is_empty() {
                        debug_assert!(false,
-                               "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
-                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                               "Failed to {} {}/{}: primary namespace may not be empty if a non-empty secondary namespace is given.",
+                               operation, PrintableString(primary_namespace), PrintableString(secondary_namespace));
                        let msg = format!(
-                               "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
-                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                               "Failed to {} {}/{}: primary namespace may not be empty if a non-empty secondary namespace is given.",
+                               operation, PrintableString(primary_namespace), PrintableString(secondary_namespace));
                        return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                }
-               if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) {
-                       debug_assert!(false, "Failed to {} {}/{}: namespace and sub-namespace must be valid.",
-                               operation, PrintableString(namespace), PrintableString(sub_namespace));
-                       let msg = format!("Failed to {} {}/{}: namespace and sub-namespace must be valid.",
-                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+               if !is_valid_kvstore_str(primary_namespace) || !is_valid_kvstore_str(secondary_namespace) {
+                       debug_assert!(false, "Failed to {} {}/{}: primary namespace and secondary namespace must be valid.",
+                               operation, PrintableString(primary_namespace), PrintableString(secondary_namespace));
+                       let msg = format!("Failed to {} {}/{}: primary namespace and secondary namespace must be valid.",
+                               operation, PrintableString(primary_namespace), PrintableString(secondary_namespace));
                        return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
                }
        }
index fc4bd36bf4c649e6056c3e329451dee974983532..85f3eb5f32cb49e6e23bd462c8197a32a6bbfbfa 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-rapid-gossip-sync"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Arik Sosman <git@arik.io>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -15,11 +15,11 @@ no-std = ["lightning/no-std"]
 std = ["lightning/std"]
 
 [dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", default-features = false }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", default-features = false }
 bitcoin = { version = "0.29.0", default-features = false }
 
 [target.'cfg(ldk_bench)'.dependencies]
 criterion = { version = "0.4", optional = true, default-features = false }
 
 [dev-dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", features = ["_test_utils"] }
index aad057f86c15ed1c35100a1be20f03d156e5391c..dddbe4b516b89363f3fdcd6df20cdf9c3116ca4d 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-transaction-sync"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Elias Rohrer"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
@@ -21,7 +21,7 @@ esplora-blocking = ["esplora-client/blocking"]
 async-interface = []
 
 [dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", default-features = false }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", default-features = false }
 bitcoin = { version = "0.29.0", default-features = false }
 bdk-macros = "0.6"
 futures = { version = "0.3", optional = true }
@@ -29,7 +29,7 @@ esplora-client = { version = "0.4", default-features = false, optional = true }
 reqwest = { version = "0.11", optional = true, default-features = false, features = ["json"] }
 
 [dev-dependencies]
-lightning = { version = "0.0.117-alpha1", path = "../lightning", features = ["std"] }
+lightning = { version = "0.0.117-alpha2", path = "../lightning", features = ["std"] }
 electrsd = { version = "0.22.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_23_0"] }
 electrum-client = "0.12.0"
 tokio = { version = "1.14.0", features = ["full"] }
index 773467b580e5c531e4cbfc3e0f6de6ae4f101f65..a94716e7df0ae4df91e30c5cef73221d06267950 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning"
-version = "0.0.117-alpha1"
+version = "0.0.117-alpha2"
 authors = ["Matt Corallo"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
index 382ffac3e8e886c303207798fafe528c78406b0d..c888118bcba4e13b3400027249e8e14eb4e535da 100644 (file)
@@ -551,6 +551,32 @@ impl PackageSolvingData {
                        _ => { mem::discriminant(self) == mem::discriminant(&input) }
                }
        }
+       fn as_tx_input(&self, previous_output: BitcoinOutPoint) -> TxIn {
+               let sequence = match self {
+                       PackageSolvingData::RevokedOutput(_) => Sequence::ENABLE_RBF_NO_LOCKTIME,
+                       PackageSolvingData::RevokedHTLCOutput(_) => Sequence::ENABLE_RBF_NO_LOCKTIME,
+                       PackageSolvingData::CounterpartyOfferedHTLCOutput(outp) => if outp.channel_type_features.supports_anchors_zero_fee_htlc_tx() {
+                               Sequence::from_consensus(1)
+                       } else {
+                               Sequence::ENABLE_RBF_NO_LOCKTIME
+                       },
+                       PackageSolvingData::CounterpartyReceivedHTLCOutput(outp) => if outp.channel_type_features.supports_anchors_zero_fee_htlc_tx() {
+                               Sequence::from_consensus(1)
+                       } else {
+                               Sequence::ENABLE_RBF_NO_LOCKTIME
+                       },
+                       _ => {
+                               debug_assert!(false, "This should not be reachable by 'untractable' or 'malleable with external funding' packages");
+                               Sequence::ENABLE_RBF_NO_LOCKTIME
+                       },
+               };
+               TxIn {
+                       previous_output,
+                       script_sig: Script::new(),
+                       sequence,
+                       witness: Witness::new(),
+               }
+       }
        fn finalize_input<Signer: WriteableEcdsaChannelSigner>(&self, bumped_tx: &mut Transaction, i: usize, onchain_handler: &mut OnchainTxHandler<Signer>) -> bool {
                match self {
                        PackageSolvingData::RevokedOutput(ref outp) => {
@@ -895,13 +921,8 @@ impl PackageTemplate {
                                value,
                        }],
                };
-               for (outpoint, _) in self.inputs.iter() {
-                       bumped_tx.input.push(TxIn {
-                               previous_output: *outpoint,
-                               script_sig: Script::new(),
-                               sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
-                               witness: Witness::new(),
-                       });
+               for (outpoint, outp) in self.inputs.iter() {
+                       bumped_tx.input.push(outp.as_tx_input(*outpoint));
                }
                for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
                        log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
index bb98e271597309d057ca4712b394e302b35cddc3..269887a3dbac06fc1ba24dcda084f5c2c2115de2 100644 (file)
@@ -199,6 +199,9 @@ pub enum ClosureReason {
        /// The counterparty requested a cooperative close of a channel that had not been funded yet.
        /// The channel has been immediately closed.
        CounterpartyCoopClosedUnfundedChannel,
+       /// Another channel in the same funding batch closed before the funding transaction
+       /// was ready to be broadcast.
+       FundingBatchClosure,
 }
 
 impl core::fmt::Display for ClosureReason {
@@ -219,6 +222,7 @@ impl core::fmt::Display for ClosureReason {
                        ClosureReason::DisconnectedPeer => f.write_str("the peer disconnected prior to the channel being funded"),
                        ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
                        ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
+                       ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"),
                }
        }
 }
@@ -233,6 +237,7 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
        (10, DisconnectedPeer) => {},
        (12, OutdatedChannelManager) => {},
        (13, CounterpartyCoopClosedUnfundedChannel) => {},
+       (15, FundingBatchClosure) => {}
 );
 
 /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
@@ -844,6 +849,8 @@ pub enum Event {
        },
        /// Used to indicate to the user that they can abandon the funding transaction and recycle the
        /// inputs for another purpose.
+       ///
+       /// This event is not guaranteed to be generated for channels that are closed due to a restart.
        DiscardFunding {
                /// The channel_id of the channel which has been closed.
                channel_id: ChannelId,
index 796c041f8185d78d68972370726fdccb96fb4fd5..55e56be2a79377079377591f14ad1fef33311f17 100644 (file)
@@ -300,9 +300,24 @@ enum ChannelState {
        /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
        /// to drop us, but we store this anyway.
        ShutdownComplete = 4096,
+       /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
+       /// broadcasting of the funding transaction is being held until all channels in the batch
+       /// have received funding_signed and have their monitors persisted.
+       WaitingForBatch = 1 << 13,
 }
-const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
-const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
+const BOTH_SIDES_SHUTDOWN_MASK: u32 =
+       ChannelState::LocalShutdownSent as u32 |
+       ChannelState::RemoteShutdownSent as u32;
+const MULTI_STATE_FLAGS: u32 =
+       BOTH_SIDES_SHUTDOWN_MASK |
+       ChannelState::PeerDisconnected as u32 |
+       ChannelState::MonitorUpdateInProgress as u32;
+const STATE_FLAGS: u32 =
+       MULTI_STATE_FLAGS |
+       ChannelState::TheirChannelReady as u32 |
+       ChannelState::OurChannelReady as u32 |
+       ChannelState::AwaitingRemoteRevoke as u32 |
+       ChannelState::WaitingForBatch as u32;
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -527,12 +542,15 @@ pub(super) struct ReestablishResponses {
 
 /// The return type of `force_shutdown`
 ///
-/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
-/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
-/// channel's counterparty_node_id and channel_id).
+/// Contains a tuple with the following:
+/// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
+/// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's
+/// counterparty_node_id and channel_id).
+/// - An optional transaction id identifying a corresponding batch funding transaction.
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
-       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>
+       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
+       Option<Txid>
 );
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -821,6 +839,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
        funding_transaction: Option<Transaction>,
+       is_batch_funding: Option<()>,
 
        counterparty_cur_commitment_point: Option<PublicKey>,
        counterparty_prev_commitment_point: Option<PublicKey>,
@@ -945,7 +964,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns true if we've ever received a message from the remote end for this Channel
        pub fn have_received_message(&self) -> bool {
-               self.channel_state > (ChannelState::OurInitSent as u32)
+               self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
        }
 
        /// Returns true if this channel is fully established and not known to be closing.
@@ -1161,7 +1180,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        // Checks whether we should emit a `ChannelPending` event.
        pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
-               self.is_funding_initiated() && !self.channel_pending_event_emitted
+               self.is_funding_broadcast() && !self.channel_pending_event_emitted
        }
 
        // Returns whether we already emitted a `ChannelPending` event.
@@ -1220,9 +1239,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                did_channel_update
        }
 
-       /// Returns true if funding_created was sent/received.
-       pub fn is_funding_initiated(&self) -> bool {
-               self.channel_state >= ChannelState::FundingSent as u32
+       /// Returns true if funding_signed was sent/received and the
+       /// funding transaction has been broadcast if necessary.
+       pub fn is_funding_broadcast(&self) -> bool {
+               self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
+                       self.channel_state & ChannelState::WaitingForBatch as u32 == 0
        }
 
        /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
@@ -1952,15 +1973,41 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                res
        }
 
-       /// Returns transaction if there is pending funding transaction that is yet to broadcast
-       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
-               if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
-                       self.funding_transaction.clone()
+       fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
+               where F: Fn() -> Option<O> {
+               if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
+                  self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
+                       f()
                } else {
                        None
                }
        }
 
+       /// Returns the transaction if there is a pending funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+               self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
+       }
+
+       /// Returns the transaction ID if there is a pending funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
+               self.if_unbroadcasted_funding(||
+                       self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
+               )
+       }
+
+       /// Returns whether the channel is funded in a batch.
+       pub fn is_batch_funding(&self) -> bool {
+               self.is_batch_funding.is_some()
+       }
+
+       /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
+       /// broadcast.
+       pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
+               self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
+       }
+
        /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
        /// shutdown of this channel - no more calls into this Channel may be made afterwards except
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
@@ -2001,10 +2048,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                }))
                        } else { None }
                } else { None };
+               let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
                self.update_time_counter += 1;
-               (monitor_update, dropped_outbound_htlcs)
+               (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid)
        }
 }
 
@@ -2574,7 +2622,11 @@ impl<SP: Deref> Channel<SP> where
                        counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 
                assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
-               self.context.channel_state = ChannelState::FundingSent as u32;
+               if self.context.is_batch_funding() {
+                       self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
+               } else {
+                       self.context.channel_state = ChannelState::FundingSent as u32;
+               }
                self.context.cur_holder_commitment_transaction_number -= 1;
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
@@ -2585,6 +2637,15 @@ impl<SP: Deref> Channel<SP> where
                Ok(channel_monitor)
        }
 
+       /// Updates the state of the channel to indicate that all channels in the batch have received
+       /// funding_signed and persisted their monitors.
+       /// The funding transaction is consequently allowed to be broadcast, and the channel can be
+       /// treated as a non-batch channel going forward.
+       pub fn set_batch_ready(&mut self) {
+               self.context.is_batch_funding = None;
+               self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
+       }
+
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
@@ -2612,7 +2673,13 @@ impl<SP: Deref> Channel<SP> where
 
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
 
-               if non_shutdown_state == ChannelState::FundingSent as u32 {
+               // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
+               // batch, but we can receive channel_ready messages.
+               debug_assert!(
+                       non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
+                       non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
+               );
+               if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
                        self.context.channel_state |= ChannelState::TheirChannelReady as u32;
                } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
@@ -3111,7 +3178,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
+               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
                   (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
@@ -3585,17 +3652,17 @@ impl<SP: Deref> Channel<SP> where
        /// resent.
        /// No further message handling calls may be made until a channel_reestablish dance has
        /// completed.
-       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
+       /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
+       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
                assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-                       return;
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+                       return Err(());
                }
 
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
                        // While the below code should be idempotent, it's simpler to just return early, as
                        // redundant disconnect events can fire, though they should be rare.
-                       return;
+                       return Ok(());
                }
 
                if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
@@ -3656,6 +3723,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.channel_state |= ChannelState::PeerDisconnected as u32;
                log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
+               Ok(())
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
@@ -3701,12 +3769,12 @@ impl<SP: Deref> Channel<SP> where
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
                // first received the funding_signed.
                let mut funding_broadcastable =
-                       if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+                       if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
                                self.context.funding_transaction.take()
                        } else { None };
                // That said, if the funding transaction is already confirmed (ie we're active with a
                // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
-               if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+               if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
                        funding_broadcastable = None;
                }
 
@@ -4209,7 +4277,7 @@ impl<SP: Deref> Channel<SP> where
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
@@ -4603,7 +4671,7 @@ impl<SP: Deref> Channel<SP> where
        pub fn is_awaiting_initial_mon_persist(&self) -> bool {
                if !self.is_awaiting_monitor_update() { return false; }
                if self.context.channel_state &
-                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
+                       !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
                                == ChannelState::FundingSent as u32 {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
                        // FundingSent set, though our peer could have sent their channel_ready.
@@ -4634,7 +4702,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Returns true if our channel_ready has been sent
        pub fn is_our_channel_ready(&self) -> bool {
-               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
+               (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
        }
 
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
@@ -4683,6 +4751,8 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
+               // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
+               // channel_ready until the entire batch is ready.
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
                let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
                        self.context.channel_state |= ChannelState::OurChannelReady as u32;
@@ -4695,7 +4765,7 @@ impl<SP: Deref> Channel<SP> where
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
-                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
+                       if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
                                // We should never see a funding transaction on-chain until we've received
                                // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
                                // an inbound channel - before that we have no known funding TXID). The fuzzer,
@@ -4865,7 +4935,7 @@ impl<SP: Deref> Channel<SP> where
                }
 
                let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
-               if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
+               if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
                   (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
                        let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
                        if self.context.funding_tx_confirmation_height == 0 {
@@ -4893,7 +4963,7 @@ impl<SP: Deref> Channel<SP> where
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
                        log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
-                       assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
+                       assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
                        assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
                        return Err(ClosureReason::FundingTimedOut);
                }
@@ -5468,9 +5538,6 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
-               if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
-               }
                self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
                        fee_base_msat: msg.contents.fee_base_msat,
                        fee_proportional_millionths: msg.contents.fee_proportional_millionths,
@@ -5513,7 +5580,7 @@ impl<SP: Deref> Channel<SP> where
                // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
                // script is set, we just force-close and call it a day.
                let mut chan_closed = false;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        chan_closed = true;
                }
 
@@ -5542,7 +5609,7 @@ impl<SP: Deref> Channel<SP> where
 
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
+               if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
                        self.context.channel_state = ChannelState::ShutdownComplete as u32;
                } else {
                        self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
@@ -5765,6 +5832,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                        channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
+                               is_batch_funding: None,
 
                                counterparty_cur_commitment_point: None,
                                counterparty_prev_commitment_point: None,
@@ -5825,7 +5893,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
+       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
        -> Result<(Channel<SP>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
@@ -5867,6 +5935,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                }
 
                self.context.funding_transaction = Some(funding_transaction);
+               self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
 
                let channel = Channel {
                        context: self.context,
@@ -6416,6 +6485,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                        channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
+                               is_batch_funding: None,
 
                                counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
                                counterparty_prev_commitment_point: None,
@@ -7031,6 +7101,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (31, channel_pending_event_emitted, option),
                        (35, pending_outbound_skimmed_fees, optional_vec),
                        (37, holding_cell_skimmed_fees, optional_vec),
+                       (38, self.context.is_batch_funding, option),
                });
 
                Ok(())
@@ -7253,7 +7324,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                };
 
                let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
-               let funding_transaction = Readable::read(reader)?;
+               let funding_transaction: Option<Transaction> = Readable::read(reader)?;
 
                let counterparty_cur_commitment_point = Readable::read(reader)?;
 
@@ -7314,6 +7385,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
                let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
 
+               let mut is_batch_funding: Option<()> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -7339,6 +7412,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (31, channel_pending_event_emitted, option),
                        (35, pending_outbound_skimmed_fees_opt, optional_vec),
                        (37, holding_cell_skimmed_fees_opt, optional_vec),
+                       (38, is_batch_funding, option),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -7346,7 +7420,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        // If we've gotten to the funding stage of the channel, populate the signer with its
                        // required channel parameters.
                        let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
-                       if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
+                       if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
                                holder_signer.provide_channel_parameters(&channel_parameters);
                        }
                        (channel_keys_id, holder_signer)
@@ -7496,6 +7570,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                                channel_transaction_parameters: channel_parameters,
                                funding_transaction,
+                               is_batch_funding,
 
                                counterparty_cur_commitment_point,
                                counterparty_prev_commitment_point,
@@ -7549,7 +7624,7 @@ mod tests {
        use crate::ln::PaymentHash;
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+       use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use crate::ln::features::ChannelTypeFeatures;
        use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
@@ -7728,7 +7803,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7855,7 +7930,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7863,7 +7938,7 @@ mod tests {
 
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
-               node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+               assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
                let msg = node_b_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
@@ -7871,7 +7946,7 @@ mod tests {
 
                // Check that the commitment point in Node A's channel_reestablish message
                // is sane.
-               node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
+               assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
                let msg = node_a_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
@@ -8043,7 +8118,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -9023,4 +9098,146 @@ mod tests {
                );
                assert!(res.is_err());
        }
+
+       #[test]
+       fn test_waiting_for_batch() {
+               let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+               let logger = test_utils::TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
+               let chain_hash = genesis_block(network).header.block_hash();
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+               let mut config = UserConfig::default();
+               // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
+               // channel in a batch before all channels are ready.
+               config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+               // Create a channel from node a to node b that will be part of batch funding.
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest,
+                       &&keys_provider,
+                       &&keys_provider,
+                       node_b_node_id,
+                       &channelmanager::provided_init_features(&config),
+                       10000000,
+                       100000,
+                       42,
+                       &config,
+                       0,
+                       42,
+               ).unwrap();
+
+               let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+               let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest,
+                       &&keys_provider,
+                       &&keys_provider,
+                       node_b_node_id,
+                       &channelmanager::provided_channel_type_features(&config),
+                       &channelmanager::provided_init_features(&config),
+                       &open_channel_msg,
+                       7,
+                       &config,
+                       0,
+                       &&logger,
+                       true,  // Allow node b to send a 0conf channel_ready.
+               ).unwrap();
+
+               let accept_channel_msg = node_b_chan.accept_inbound_channel();
+               node_a_chan.accept_channel(
+                       &accept_channel_msg,
+                       &config.channel_handshake_limits,
+                       &channelmanager::provided_init_features(&config),
+               ).unwrap();
+
+               // Fund the channel with a batch funding transaction.
+               let output_script = node_a_chan.context.get_funding_redeemscript();
+               let tx = Transaction {
+                       version: 1,
+                       lock_time: PackedLockTime::ZERO,
+                       input: Vec::new(),
+                       output: vec![
+                               TxOut {
+                                       value: 10000000, script_pubkey: output_script.clone(),
+                               },
+                               TxOut {
+                                       value: 10000000, script_pubkey: Builder::new().into_script(),
+                               },
+                       ]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
+                       tx.clone(),
+                       funding_outpoint,
+                       true,
+                       &&logger,
+               ).map_err(|_| ()).unwrap();
+               let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
+                       &funding_created_msg,
+                       best_block,
+                       &&keys_provider,
+                       &&logger,
+               ).map_err(|_| ()).unwrap();
+               let node_b_updates = node_b_chan.monitor_updating_restored(
+                       &&logger,
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       0,
+               );
+
+               // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
+               // broadcasting the funding transaction until the batch is ready.
+               let _ = node_a_chan.funding_signed(
+                       &funding_signed_msg,
+                       best_block,
+                       &&keys_provider,
+                       &&logger,
+               ).unwrap();
+               let node_a_updates = node_a_chan.monitor_updating_restored(
+                       &&logger,
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       0,
+               );
+               // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
+               // as the funding transaction depends on all channels in the batch becoming ready.
+               assert!(node_a_updates.channel_ready.is_none());
+               assert!(node_a_updates.funding_broadcastable.is_none());
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::WaitingForBatch as u32,
+               );
+
+               // It is possible to receive a 0conf channel_ready from the remote node.
+               node_a_chan.channel_ready(
+                       &node_b_updates.channel_ready.unwrap(),
+                       &&keys_provider,
+                       chain_hash,
+                       &config,
+                       &best_block,
+                       &&logger,
+               ).unwrap();
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::WaitingForBatch as u32 |
+                       ChannelState::TheirChannelReady as u32,
+               );
+
+               // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
+               node_a_chan.set_batch_ready();
+               assert_eq!(
+                       node_a_chan.context.channel_state,
+                       ChannelState::FundingSent as u32 |
+                       ChannelState::TheirChannelReady as u32,
+               );
+               assert!(node_a_chan.check_get_channel_ready(0).is_some());
+       }
 }
index 3ef57c5b87f024e8861ecabf7226c4f6eae19ad7..392f3e1cb052cf196b50b2b7e3734b62ab167e4c 100644 (file)
@@ -64,7 +64,7 @@ use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, Maybe
 use crate::util::logger::{Level, Logger};
 use crate::util::errors::APIError;
 
-use alloc::collections::BTreeMap;
+use alloc::collections::{btree_map, BTreeMap};
 
 use crate::io;
 use crate::prelude::*;
@@ -1201,6 +1201,12 @@ where
        /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
        /// Notifier the lock contains sends out a notification when the lock is released.
        total_consistency_lock: RwLock<()>,
+       /// Tracks the progress of channels going through batch funding by whether funding_signed was
+       /// received and the monitor has been persisted.
+       ///
+       /// This information does not need to be persisted as funding nodes can forget
+       /// unfunded channels upon disconnection.
+       funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
 
        background_events_processed_since_startup: AtomicBool,
 
@@ -1788,7 +1794,7 @@ macro_rules! handle_error {
                                let mut msg_events = Vec::with_capacity(2);
 
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
-                                       $self.finish_force_close_channel(shutdown_res);
+                                       $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
                                                msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
@@ -2025,9 +2031,54 @@ macro_rules! handle_monitor_update_completion {
                }
 
                let channel_id = $chan.context.channel_id();
+               let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
                core::mem::drop($peer_state_lock);
                core::mem::drop($per_peer_state_lock);
 
+               // If the channel belongs to a batch funding transaction, the progress of the batch
+               // should be updated as we have received funding_signed and persisted the monitor.
+               if let Some(txid) = unbroadcasted_batch_funding_txid {
+                       let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
+                       let mut batch_completed = false;
+                       if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
+                               let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
+                                       *chan_id == channel_id &&
+                                       *pubkey == counterparty_node_id
+                               ));
+                               if let Some(channel_state) = channel_state {
+                                       channel_state.2 = true;
+                               } else {
+                                       debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
+                               }
+                               batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
+                       } else {
+                               debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
+                       }
+
+                       // When all channels in a batched funding transaction have become ready, it is not necessary
+                       // to track the progress of the batch anymore and the state of the channels can be updated.
+                       if batch_completed {
+                               let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
+                               let per_peer_state = $self.per_peer_state.read().unwrap();
+                               let mut batch_funding_tx = None;
+                               for (channel_id, counterparty_node_id, _) in removed_batch_state {
+                                       if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                               let mut peer_state = peer_state_mutex.lock().unwrap();
+                                               if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+                                                       batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
+                                                       chan.set_batch_ready();
+                                                       let mut pending_events = $self.pending_events.lock().unwrap();
+                                                       emit_channel_pending_event!(pending_events, chan);
+                                               }
+                                       }
+                               }
+                               if let Some(tx) = batch_funding_tx {
+                                       log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.txid());
+                                       $self.tx_broadcaster.broadcast_transactions(&[&tx]);
+                               }
+                       }
+               }
+
                $self.handle_monitor_update_completion_actions(update_actions);
 
                if let Some(forwards) = htlc_forwards {
@@ -2230,9 +2281,9 @@ where
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
                        background_events_processed_since_startup: AtomicBool::new(false),
-
                        event_persist_notifier: Notifier::new(),
                        needs_persist_flag: AtomicBool::new(false),
+                       funding_batch_states: Mutex::new(BTreeMap::new()),
 
                        entropy_source,
                        node_signer,
@@ -2497,6 +2548,7 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+               let mut shutdown_result = None;
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
@@ -2511,6 +2563,7 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let their_features = &peer_state.latest_features;
+                                               let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
                                                let (shutdown_msg, mut monitor_update_opt, htlcs) =
                                                        chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
                                                failed_htlcs = htlcs;
@@ -2541,6 +2594,7 @@ where
                                                                        });
                                                                }
                                                                self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
+                                                               shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
                                                        }
                                                }
                                                break;
@@ -2562,6 +2616,10 @@ where
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
 
+               if let Some(shutdown_result) = shutdown_result {
+                       self.finish_close_channel(shutdown_result);
+               }
+
                Ok(())
        }
 
@@ -2626,14 +2684,14 @@ where
                self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
 
-       fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
+       fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
                debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
                #[cfg(debug_assertions)]
                for (_, peer) in self.per_peer_state.read().unwrap().iter() {
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
 
-               let (monitor_update_option, mut failed_htlcs) = shutdown_res;
+               let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
@@ -2648,6 +2706,31 @@ where
                        // ignore the result here.
                        let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
                }
+               let mut shutdown_results = Vec::new();
+               if let Some(txid) = unbroadcasted_batch_funding_txid {
+                       let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
+                       let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       let mut has_uncompleted_channel = None;
+                       for (channel_id, counterparty_node_id, state) in affected_channels {
+                               if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                       let mut peer_state = peer_state_mutex.lock().unwrap();
+                                       if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
+                                               update_maps_on_chan_removal!(self, &chan.context());
+                                               self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
+                                               shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                       }
+                               }
+                               has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
+                       }
+                       debug_assert!(
+                               has_uncompleted_channel.unwrap_or(true),
+                               "Closing a batch where all channels have completed initial monitor update",
+                       );
+               }
+               for shutdown_result in shutdown_results.drain(..) {
+                       self.finish_close_channel(shutdown_result);
+               }
        }
 
        /// `peer_msg` should be set when we receive a message from a peer, but not set when the
@@ -2672,11 +2755,11 @@ where
                                mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
-                                               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+                                               self.finish_close_channel(chan.context.force_shutdown(broadcast));
                                                (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
                                        },
                                        ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
-                                               self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false));
+                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
@@ -3537,7 +3620,7 @@ where
        ///
        /// See [`ChannelManager::send_preflight_probes`] for more information.
        pub fn send_spontaneous_preflight_probes(
-               &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32, 
+               &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
                liquidity_limit_multiplier: Option<u64>,
        ) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
                let payment_params =
@@ -3644,8 +3727,9 @@ where
 
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
-       fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
-               &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+       fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
+               &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
+               mut find_funding_output: FundingOutput,
        ) -> Result<(), APIError> {
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -3657,7 +3741,7 @@ where
                        Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
-                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
+                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
                                                let user_id = chan.context.get_user_id();
@@ -3713,7 +3797,7 @@ where
 
        #[cfg(test)]
        pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
-               self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
+               self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_, tx| {
                        Ok(OutPoint { txid: tx.txid(), index: output_index })
                })
        }
@@ -3749,17 +3833,37 @@ where
        /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
        /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
        pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
+               self.batch_funding_transaction_generated(&[(temporary_channel_id, counterparty_node_id)], funding_transaction)
+       }
+
+       /// Call this upon creation of a batch funding transaction for the given channels.
+       ///
+       /// Return values are identical to [`Self::funding_transaction_generated`], respective to
+       /// each individual channel and transaction output.
+       ///
+       /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction
+       /// will only be broadcast when we have safely received and persisted the counterparty's
+       /// signature for each channel.
+       ///
+       /// If there is an error, all channels in the batch are to be considered closed.
+       pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               let mut result = Ok(());
 
                if !funding_transaction.is_coin_base() {
                        for inp in funding_transaction.input.iter() {
                                if inp.witness.is_empty() {
-                                       return Err(APIError::APIMisuseError {
+                                       result = result.and(Err(APIError::APIMisuseError {
                                                err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
-                                       });
+                                       }));
                                }
                        }
                }
+               if funding_transaction.output.len() > u16::max_value() as usize {
+                       result = result.and(Err(APIError::APIMisuseError {
+                               err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+                       }));
+               }
                {
                        let height = self.best_block.read().unwrap().height();
                        // Transactions are evaluated as final by network mempools if their locktime is strictly
@@ -3767,37 +3871,93 @@ where
                        // node might not have perfect sync about their blockchain views. Thus, if the wallet
                        // module is ahead of LDK, only allow one more block of headroom.
                        if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 {
-                               return Err(APIError::APIMisuseError {
+                               result = result.and(Err(APIError::APIMisuseError {
                                        err: "Funding transaction absolute timelock is non-final".to_owned()
-                               });
+                               }));
                        }
                }
-               self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
-                       if tx.output.len() > u16::max_value() as usize {
-                               return Err(APIError::APIMisuseError {
-                                       err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
-                               });
-                       }
 
-                       let mut output_index = None;
-                       let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
-                       for (idx, outp) in tx.output.iter().enumerate() {
-                               if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
-                                       if output_index.is_some() {
+               let txid = funding_transaction.txid();
+               let is_batch_funding = temporary_channels.len() > 1;
+               let mut funding_batch_states = if is_batch_funding {
+                       Some(self.funding_batch_states.lock().unwrap())
+               } else {
+                       None
+               };
+               let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
+                       match states.entry(txid) {
+                               btree_map::Entry::Occupied(_) => {
+                                       result = result.clone().and(Err(APIError::APIMisuseError {
+                                               err: "Batch funding transaction with the same txid already exists".to_owned()
+                                       }));
+                                       None
+                               },
+                               btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
+                       }
+               });
+               for (channel_idx, &(temporary_channel_id, counterparty_node_id)) in temporary_channels.iter().enumerate() {
+                       result = result.and_then(|_| self.funding_transaction_generated_intern(
+                               temporary_channel_id,
+                               counterparty_node_id,
+                               funding_transaction.clone(),
+                               is_batch_funding,
+                               |chan, tx| {
+                                       let mut output_index = None;
+                                       let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
+                                       for (idx, outp) in tx.output.iter().enumerate() {
+                                               if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
+                                                       if output_index.is_some() {
+                                                               return Err(APIError::APIMisuseError {
+                                                                       err: "Multiple outputs matched the expected script and value".to_owned()
+                                                               });
+                                                       }
+                                                       output_index = Some(idx as u16);
+                                               }
+                                       }
+                                       if output_index.is_none() {
                                                return Err(APIError::APIMisuseError {
-                                                       err: "Multiple outputs matched the expected script and value".to_owned()
+                                                       err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
                                                });
                                        }
-                                       output_index = Some(idx as u16);
+                                       let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
+                                       if let Some(funding_batch_state) = funding_batch_state.as_mut() {
+                                               funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+                                       }
+                                       Ok(outpoint)
+                               })
+                       );
+               }
+               if let Err(ref e) = result {
+                       // Remaining channels need to be removed on any error.
+                       let e = format!("Error in transaction funding: {:?}", e);
+                       let mut channels_to_remove = Vec::new();
+                       channels_to_remove.extend(funding_batch_states.as_mut()
+                               .and_then(|states| states.remove(&txid))
+                               .into_iter().flatten()
+                               .map(|(chan_id, node_id, _state)| (chan_id, node_id))
+                       );
+                       channels_to_remove.extend(temporary_channels.iter()
+                               .map(|(&chan_id, &node_id)| (chan_id, node_id))
+                       );
+                       let mut shutdown_results = Vec::new();
+                       {
+                               let per_peer_state = self.per_peer_state.read().unwrap();
+                               for (channel_id, counterparty_node_id) in channels_to_remove {
+                                       per_peer_state.get(&counterparty_node_id)
+                                               .map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
+                                               .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
+                                               .map(|mut chan| {
+                                                       update_maps_on_chan_removal!(self, &chan.context());
+                                                       self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
+                                                       shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                               });
                                }
                        }
-                       if output_index.is_none() {
-                               return Err(APIError::APIMisuseError {
-                                       err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
-                               });
+                       for shutdown_result in shutdown_results.drain(..) {
+                               self.finish_close_channel(shutdown_result);
                        }
-                       Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
-               })
+               }
+               result
        }
 
        /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
@@ -4849,7 +5009,7 @@ where
                        }
 
                        for shutdown_res in shutdown_channels {
-                               self.finish_force_close_channel(shutdown_res);
+                               self.finish_close_channel(shutdown_res);
                        }
 
                        self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
@@ -6075,13 +6235,15 @@ where
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
                if let Some(shutdown_res) = finish_shutdown {
-                       self.finish_force_close_channel(shutdown_res);
+                       self.finish_close_channel(shutdown_res);
                }
 
                Ok(())
        }
 
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
+               let mut shutdown_result = None;
+               let unbroadcasted_batch_funding_txid;
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -6094,6 +6256,7 @@ where
                        match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
                                                let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
                                                if let Some(msg) = closing_signed {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
@@ -6130,6 +6293,11 @@ where
                                });
                        }
                        self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
+                       shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
+               }
+               mem::drop(per_peer_state);
+               if let Some(shutdown_result) = shutdown_result {
+                       self.finish_close_channel(shutdown_result);
                }
                Ok(())
        }
@@ -6588,7 +6756,7 @@ where
                                        if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersistNoEvents);
                                        } else {
-                                               log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
+                                               log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
                                                try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
                                        }
                                } else {
@@ -6734,7 +6902,7 @@ where
                }
 
                for failure in failed_channels.drain(..) {
-                       self.finish_force_close_channel(failure);
+                       self.finish_close_channel(failure);
                }
 
                has_pending_monitor_events
@@ -6804,6 +6972,8 @@ where
        fn maybe_generate_initial_closing_signed(&self) -> bool {
                let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
                let mut has_update = false;
+               let mut shutdown_result = None;
+               let mut unbroadcasted_batch_funding_txid = None;
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
@@ -6814,6 +6984,7 @@ where
                                peer_state.channel_by_id.retain(|channel_id, phase| {
                                        match phase {
                                                ChannelPhase::Funded(chan) => {
+                                                       unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
                                                        match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
                                                                Ok((msg_opt, tx_opt)) => {
                                                                        if let Some(msg) = msg_opt {
@@ -6836,6 +7007,7 @@ where
                                                                                log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
+                                                                               shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
                                                                                false
                                                                        } else { true }
                                                                },
@@ -6857,6 +7029,10 @@ where
                        let _ = handle_error!(self, err, counterparty_node_id);
                }
 
+               if let Some(shutdown_result) = shutdown_result {
+                       self.finish_close_channel(shutdown_result);
+               }
+
                has_update
        }
 
@@ -6882,7 +7058,7 @@ where
                                                counterparty_node_id, funding_txo, update
                                        });
                        }
-                       self.finish_force_close_channel(failure);
+                       self.finish_close_channel(failure);
                }
        }
 
@@ -7822,7 +7998,6 @@ where
        fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
                let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
                        self, || NotifyOption::SkipPersistHandleEvents);
-
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
@@ -7835,24 +8010,24 @@ where
                                peer_state.channel_by_id.retain(|_, phase| {
                                        let context = match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                                       // We only retain funded channels that are not shutdown.
-                                                       if !chan.is_shutdown() {
+                                                       if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
+                                                               // We only retain funded channels that are not shutdown.
                                                                return true;
                                                        }
-                                                       &chan.context
+                                                       &mut chan.context
                                                },
                                                // Unfunded channels will always be removed.
                                                ChannelPhase::UnfundedOutboundV1(chan) => {
-                                                       &chan.context
+                                                       &mut chan.context
                                                },
                                                ChannelPhase::UnfundedInboundV1(chan) => {
-                                                       &chan.context
+                                                       &mut chan.context
                                                },
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
                                        self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
+                                       failed_channels.push(context.force_shutdown(false));
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
@@ -7911,7 +8086,7 @@ where
                mem::drop(per_peer_state);
 
                for failure in failed_channels.drain(..) {
-                       self.finish_force_close_channel(failure);
+                       self.finish_close_channel(failure);
                }
        }
 
@@ -8656,7 +8831,7 @@ where
                                }
 
                                number_of_funded_channels += peer_state.channel_by_id.iter().filter(
-                                       |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false }
+                                       |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
                                ).count();
                        }
 
@@ -8667,7 +8842,7 @@ where
                                let peer_state = &mut *peer_state_lock;
                                for channel in peer_state.channel_by_id.iter().filter_map(
                                        |(_, phase)| if let ChannelPhase::Funded(channel) = phase {
-                                               if channel.context.is_funding_initiated() { Some(channel) } else { None }
+                                               if channel.context.is_funding_broadcast() { Some(channel) } else { None }
                                        } else { None }
                                ) {
                                        channel.write(writer)?;
@@ -9091,7 +9266,10 @@ where
                                                log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
-                                       let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
+                                       let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
+                                       if batch_funding_txid.is_some() {
+                                               return Err(DecodeError::InvalidValue);
+                                       }
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id, funding_txo, update
@@ -9131,7 +9309,7 @@ where
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
-                                       if channel.context.is_funding_initiated() {
+                                       if channel.context.is_funding_broadcast() {
                                                id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
                                        }
                                        match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
@@ -9845,6 +10023,8 @@ where
                        event_persist_notifier: Notifier::new(),
                        needs_persist_flag: AtomicBool::new(false),
 
+                       funding_batch_states: Mutex::new(BTreeMap::new()),
+
                        entropy_source: args.entropy_source,
                        node_signer: args.node_signer,
                        signer_provider: args.signer_provider,
index 0e19ceb65832e47db0e3fb7d5165361acf7282b1..207c0692031a7c89b9f2f6c75da9fc71997abcc3 100644 (file)
@@ -1473,12 +1473,12 @@ pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: Clo
        let events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), events_count, "{:?}", events);
        let mut issues_discard_funding = false;
-       for (idx, event) in events.into_iter().enumerate() {
+       for event in events {
                match event {
-                       Event::ChannelClosed { ref reason, counterparty_node_id, 
+                       Event::ChannelClosed { ref reason, counterparty_node_id,
                                channel_capacity_sats, .. } => {
                                assert_eq!(*reason, expected_reason);
-                               assert_eq!(counterparty_node_id.unwrap(), expected_counterparty_node_ids[idx]);
+                               assert!(expected_counterparty_node_ids.iter().any(|id| id == &counterparty_node_id.unwrap()));
                                assert_eq!(channel_capacity_sats.unwrap(), expected_channel_capacity);
                        },
                        Event::DiscardFunding { .. } => {
@@ -1499,7 +1499,7 @@ macro_rules! check_closed_event {
                check_closed_event!($node, $events, $reason, false, $counterparty_node_ids, $channel_capacity);
        };
        ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => {
-               $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason, 
+               $crate::ln::functional_test_utils::check_closed_event(&$node, $events, $reason,
                        $is_check_discard_funding, &$counterparty_node_ids, $channel_capacity);
        }
 }
@@ -3266,3 +3266,76 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                }
        }
 }
+
+/// Initiates channel opening and creates a single batch funding transaction.
+/// This will go through the open_channel / accept_channel flow, and return the batch funding
+/// transaction with corresponding funding_created messages.
+pub fn create_batch_channel_funding<'a, 'b, 'c>(
+       funding_node: &Node<'a, 'b, 'c>,
+       params: &[(&Node<'a, 'b, 'c>, u64, u64, u128, Option<UserConfig>)],
+) -> (Transaction, Vec<msgs::FundingCreated>) {
+       let mut tx_outs = Vec::new();
+       let mut temp_chan_ids = Vec::new();
+       let mut funding_created_msgs = Vec::new();
+
+       for (other_node, channel_value_satoshis, push_msat, user_channel_id, override_config) in params {
+               // Initialize channel opening.
+               let temp_chan_id = funding_node.node.create_channel(
+                       other_node.node.get_our_node_id(), *channel_value_satoshis, *push_msat, *user_channel_id,
+                       *override_config,
+               ).unwrap();
+               let open_channel_msg = get_event_msg!(funding_node, MessageSendEvent::SendOpenChannel, other_node.node.get_our_node_id());
+               other_node.node.handle_open_channel(&funding_node.node.get_our_node_id(), &open_channel_msg);
+               let accept_channel_msg = get_event_msg!(other_node, MessageSendEvent::SendAcceptChannel, funding_node.node.get_our_node_id());
+               funding_node.node.handle_accept_channel(&other_node.node.get_our_node_id(), &accept_channel_msg);
+
+               // Create the corresponding funding output.
+               let events = funding_node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::FundingGenerationReady {
+                               ref temporary_channel_id,
+                               ref counterparty_node_id,
+                               channel_value_satoshis: ref event_channel_value_satoshis,
+                               ref output_script,
+                               user_channel_id: ref event_user_channel_id
+                       } => {
+                               assert_eq!(temporary_channel_id, &temp_chan_id);
+                               assert_eq!(counterparty_node_id, &other_node.node.get_our_node_id());
+                               assert_eq!(channel_value_satoshis, event_channel_value_satoshis);
+                               assert_eq!(user_channel_id, event_user_channel_id);
+                               tx_outs.push(TxOut {
+                                       value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+                               });
+                       },
+                       _ => panic!("Unexpected event"),
+               };
+               temp_chan_ids.push((temp_chan_id, other_node.node.get_our_node_id()));
+       }
+
+       // Compose the batch funding transaction and give it to the ChannelManager.
+       let tx = Transaction {
+               version: 2,
+               lock_time: PackedLockTime::ZERO,
+               input: Vec::new(),
+               output: tx_outs,
+       };
+       assert!(funding_node.node.batch_funding_transaction_generated(
+               temp_chan_ids.iter().map(|(a, b)| (a, b)).collect::<Vec<_>>().as_slice(),
+               tx.clone(),
+       ).is_ok());
+       check_added_monitors!(funding_node, 0);
+       let events = funding_node.node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), params.len());
+       for (other_node, ..) in params {
+               let funding_created = events
+                       .iter()
+                       .find_map(|event| match event {
+                               MessageSendEvent::SendFundingCreated { node_id, msg } if node_id == &other_node.node.get_our_node_id() => Some(msg.clone()),
+                               _ => None,
+                       })
+                       .unwrap();
+               funding_created_msgs.push(funding_created);
+       }
+       return (tx, funding_created_msgs);
+}
index 1066362a4c4c101791a0c12a051e36c7986c2741..19ba81235f2df3bb8bad508ab39787aebb95b053 100644 (file)
@@ -15,7 +15,7 @@ use crate::chain;
 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
-use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
 use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
@@ -3721,7 +3721,7 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
+       check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@ -9038,7 +9038,7 @@ fn test_duplicate_chan_id() {
                match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
                        ChannelPhase::UnfundedOutboundV1(chan) => {
                                let logger = test_utils::TestLogger::new();
-                               chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+                               chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
                        },
                        _ => panic!("Unexpected ChannelPhase variant"),
                }
@@ -9900,9 +9900,46 @@ fn test_non_final_funding_tx() {
                },
                _ => panic!()
        }
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::ChannelClosed { channel_id, .. } => {
+                       assert_eq!(channel_id, temp_channel_id);
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
+#[test]
+fn test_non_final_funding_tx_within_headroom() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let best_height = nodes[0].node.best_block.read().unwrap().height();
+
+       let chan_id = *nodes[0].network_chan_count.borrow();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
+       assert_eq!(events.len(), 1);
+       let mut tx = match events[0] {
+               Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
+                       // Timelock the transaction within a +1 headroom from the best block.
+                       Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 1), input: vec![input], output: vec![TxOut {
+                               value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+                       }]}
+               },
+               _ => panic!("Unexpected event"),
+       };
 
-       // However, transaction should be accepted if it's in a +1 headroom from best block.
-       tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
+       // Transaction should be accepted if it's in a +1 headroom from best block.
        assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
        get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 }
@@ -10361,3 +10398,230 @@ fn test_multi_post_event_actions() {
        do_test_multi_post_event_actions(true);
        do_test_multi_post_event_actions(false);
 }
+
+#[test]
+fn test_batch_channel_open() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
+
+       // Go through the funding_created and funding_signed flow with node 2.
+       nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
+       check_added_monitors(&nodes[2], 1);
+       expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before persisting all monitors has been
+       // completed.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+
+       // Complete the persistence of the monitor.
+       nodes[0].chain_monitor.complete_sole_pending_chan_update(
+               &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+       );
+       let events = nodes[0].node.get_and_clear_pending_events();
+
+       // The transaction should only have been broadcast now.
+       let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+       assert_eq!(broadcasted_txs.len(), 1);
+       assert_eq!(broadcasted_txs[0], tx);
+
+       assert_eq!(events.len(), 2);
+       assert!(events.iter().any(|e| matches!(
+               *e,
+               crate::events::Event::ChannelPending {
+                       ref counterparty_node_id,
+                       ..
+               } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
+       )));
+       assert!(events.iter().any(|e| matches!(
+               *e,
+               crate::events::Event::ChannelPending {
+                       ref counterparty_node_id,
+                       ..
+               } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
+       )));
+}
+
+#[test]
+fn test_disconnect_in_funding_batch() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // The remaining peer in the batch disconnects.
+       nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+
+       // The channels in the batch will close immediately.
+       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 4);
+       assert!(events.iter().any(|e| matches!(
+               e,
+               Event::ChannelClosed {
+                       channel_id,
+                       ..
+               } if channel_id == &channel_id_1
+       )));
+       assert!(events.iter().any(|e| matches!(
+               e,
+               Event::ChannelClosed {
+                       channel_id,
+                       ..
+               } if channel_id == &channel_id_2
+       )));
+       assert_eq!(events.iter().filter(|e| matches!(
+               e,
+               Event::DiscardFunding { .. },
+       )).count(), 2);
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       // The funding transaction should not have been broadcast, and therefore, we don't need
+       // to broadcast a force-close transaction for the closed monitor.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // Ensure the channels don't exist anymore.
+       assert!(nodes[0].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_batch_funding_close_after_funding_signed() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // Go through the funding_created and funding_signed flow with node 2.
+       nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
+       check_added_monitors(&nodes[2], 1);
+       expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // Force-close the channel for which we've completed the initial monitor.
+       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+       check_added_monitors(&nodes[0], 2);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+               let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
+               assert_eq!(monitor_updates_2.len(), 1);
+               assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { .. } => (),
+               _ => panic!("Unexpected message."),
+       }
+
+       // We broadcast the commitment transaction as part of the force-close.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
+
+       // All channels in the batch should close immediately.
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 4);
+       assert!(events.iter().any(|e| matches!(
+               e,
+               Event::ChannelClosed {
+                       channel_id,
+                       ..
+               } if channel_id == &channel_id_1
+       )));
+       assert!(events.iter().any(|e| matches!(
+               e,
+               Event::ChannelClosed {
+                       channel_id,
+                       ..
+               } if channel_id == &channel_id_2
+       )));
+       assert_eq!(events.iter().filter(|e| matches!(
+               e,
+               Event::DiscardFunding { .. },
+       )).count(), 2);
+
+       // Ensure the channels don't exist anymore.
+       assert!(nodes[0].node.list_channels().is_empty());
+}
index 892ddce2a7dc86bde458f7d9153d7245c695b3bc..f0cc872db0ad6731706cc380f59fd0c78be122f9 100644 (file)
@@ -1886,23 +1886,35 @@ fn test_yield_anchors_events() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let chan_id = create_announced_chan_between_nodes_with_value(
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(
                &nodes, 0, 1, 1_000_000, 500_000_000
-       ).2;
-       route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+       );
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 2_000_000);
 
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
-       check_closed_broadcast!(&nodes[0], true);
-       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
+
+       connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+       {
+               let txn = nodes[1].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               check_spends!(txn[0], funding_tx);
+       }
 
        get_monitor!(nodes[0], chan_id).provide_payment_preimage(
-               &payment_hash, &payment_preimage, &node_cfgs[0].tx_broadcaster,
+               &payment_hash_2, &payment_preimage_2, &node_cfgs[0].tx_broadcaster,
                &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger
        );
+       get_monitor!(nodes[1], chan_id).provide_payment_preimage(
+               &payment_hash_1, &payment_preimage_1, &node_cfgs[0].tx_broadcaster,
+               &LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger
+       );
 
        let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
        assert_eq!(holder_events.len(), 1);
@@ -1923,27 +1935,50 @@ fn test_yield_anchors_events() {
                        assert_eq!(txn.len(), 2);
                        let anchor_tx = txn.pop().unwrap();
                        let commitment_tx = txn.pop().unwrap();
+                       check_spends!(commitment_tx, funding_tx);
                        check_spends!(anchor_tx, coinbase_tx, commitment_tx);
                        (commitment_tx, anchor_tx)
                },
                _ => panic!("Unexpected event"),
        };
 
+       assert_eq!(commitment_tx.output[2].value, 1_000); // HTLC A -> B
+       assert_eq!(commitment_tx.output[3].value, 2_000); // HTLC B -> A
+
        mine_transactions(&nodes[0], &[&commitment_tx, &anchor_tx]);
        check_added_monitors!(nodes[0], 1);
+       mine_transactions(&nodes[1], &[&commitment_tx, &anchor_tx]);
+       check_added_monitors!(nodes[1], 1);
+
+       {
+               let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
+
+               let htlc_preimage_tx = txn.pop().unwrap();
+               assert_eq!(htlc_preimage_tx.input.len(), 1);
+               assert_eq!(htlc_preimage_tx.input[0].previous_output.vout, 3);
+               check_spends!(htlc_preimage_tx, commitment_tx);
+
+               let htlc_timeout_tx = txn.pop().unwrap();
+               assert_eq!(htlc_timeout_tx.input.len(), 1);
+               assert_eq!(htlc_timeout_tx.input[0].previous_output.vout, 2);
+               check_spends!(htlc_timeout_tx, commitment_tx);
+
+               if let Some(commitment_tx) = txn.pop() {
+                       check_spends!(commitment_tx, funding_tx);
+               }
+       }
 
        let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
        // Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the
        // best block is updated before the confirmed transactions are notified.
-       match *nodes[0].connect_style.borrow() {
-               ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => {
-                       assert_eq!(holder_events.len(), 3);
-                       if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = holder_events.remove(0) {}
-                       else { panic!("unexpected event"); }
-
-               },
-               _ => assert_eq!(holder_events.len(), 2),
-       };
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               assert_eq!(holder_events.len(), 3);
+               if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = holder_events.remove(0) {}
+               else { panic!("unexpected event"); }
+       } else {
+               assert_eq!(holder_events.len(), 2);
+       }
        let mut htlc_txs = Vec::with_capacity(2);
        for event in holder_events {
                match event {
@@ -1977,6 +2012,9 @@ fn test_yield_anchors_events() {
 
        // Clear the remaining events as they're not relevant to what we're testing.
        nodes[0].node.get_and_clear_pending_events();
+       nodes[1].node.get_and_clear_pending_events();
+       nodes[0].node.get_and_clear_pending_msg_events();
+       nodes[1].node.get_and_clear_pending_msg_events();
 }
 
 #[test]
index cdf5627a7aac6013371cd2b55e3cf14224a8423a..2522f99fbe85dbecd751034215207cbda23ad5ab 100644 (file)
@@ -875,7 +875,7 @@ impl OutboundPayments {
                        }
                }
 
-               let route = router.find_route_with_id(
+               let mut route = router.find_route_with_id(
                        &node_signer.get_node_id(Recipient::Node).unwrap(), &route_params,
                        Some(&first_hops.iter().collect::<Vec<_>>()), inflight_htlcs(),
                        payment_hash, payment_id,
@@ -885,6 +885,14 @@ impl OutboundPayments {
                        RetryableSendFailure::RouteNotFound
                })?;
 
+               if let Some(route_route_params) = route.route_params.as_mut() {
+                       if route_route_params.final_value_msat != route_params.final_value_msat {
+                               debug_assert!(false,
+                                       "Routers are expected to return a route which includes the requested final_value_msat");
+                               route_route_params.final_value_msat = route_params.final_value_msat;
+                       }
+               }
+
                let onion_session_privs = self.add_new_pending_payment(payment_hash,
                        recipient_onion.clone(), payment_id, keysend_preimage, &route, Some(retry_strategy),
                        Some(route_params.payment_params.clone()), entropy_source, best_block_height)
@@ -926,7 +934,7 @@ impl OutboundPayments {
                        }
                }
 
-               let route = match router.find_route_with_id(
+               let mut route = match router.find_route_with_id(
                        &node_signer.get_node_id(Recipient::Node).unwrap(), &route_params,
                        Some(&first_hops.iter().collect::<Vec<_>>()), inflight_htlcs(),
                        payment_hash, payment_id,
@@ -938,6 +946,15 @@ impl OutboundPayments {
                                return
                        }
                };
+
+               if let Some(route_route_params) = route.route_params.as_mut() {
+                       if route_route_params.final_value_msat != route_params.final_value_msat {
+                               debug_assert!(false,
+                                       "Routers are expected to return a route which includes the requested final_value_msat");
+                               route_route_params.final_value_msat = route_params.final_value_msat;
+                       }
+               }
+
                for path in route.paths.iter() {
                        if path.hops.len() == 0 {
                                log_error!(logger, "Unusable path in route (path.hops.len() must be at least 1");
@@ -1337,12 +1354,14 @@ impl OutboundPayments {
                }
                let mut has_ok = false;
                let mut has_err = false;
-               let mut pending_amt_unsent = 0;
+               let mut has_unsent = false;
                let mut total_ok_fees_msat = 0;
+               let mut total_ok_amt_sent_msat = 0;
                for (res, path) in results.iter().zip(route.paths.iter()) {
                        if res.is_ok() {
                                has_ok = true;
                                total_ok_fees_msat += path.fee_msat();
+                               total_ok_amt_sent_msat += path.final_value_msat();
                        }
                        if res.is_err() { has_err = true; }
                        if let &Err(APIError::MonitorUpdateInProgress) = res {
@@ -1351,23 +1370,27 @@ impl OutboundPayments {
                                has_err = true;
                                has_ok = true;
                                total_ok_fees_msat += path.fee_msat();
+                               total_ok_amt_sent_msat += path.final_value_msat();
                        } else if res.is_err() {
-                               pending_amt_unsent += path.final_value_msat();
+                               has_unsent = true;
                        }
                }
                if has_err && has_ok {
                        Err(PaymentSendFailure::PartialFailure {
                                results,
                                payment_id,
-                               failed_paths_retry: if pending_amt_unsent != 0 {
+                               failed_paths_retry: if has_unsent {
                                        if let Some(route_params) = &route.route_params {
                                                let mut route_params = route_params.clone();
                                                // We calculate the leftover fee budget we're allowed to spend by
                                                // subtracting the used fee from the total fee budget.
                                                route_params.max_total_routing_fee_msat = route_params
                                                        .max_total_routing_fee_msat.map(|m| m.saturating_sub(total_ok_fees_msat));
-                                               route_params.final_value_msat = pending_amt_unsent;
 
+                                               // We calculate the remaining target amount by subtracting the succeded
+                                               // path values.
+                                               route_params.final_value_msat = route_params.final_value_msat
+                                                       .saturating_sub(total_ok_amt_sent_msat);
                                                Some(route_params)
                                        } else { None }
                                } else { None },
@@ -2084,11 +2107,6 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let payment_id = PaymentId([0; 32]);
 
-               assert!(
-                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
-               );
-               assert!(outbound_payments.has_pending_payments());
-
                let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .amount_msats(1000)
                        .build().unwrap()
@@ -2099,6 +2117,12 @@ mod tests {
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
 
+               assert!(outbound_payments.add_new_awaiting_invoice(
+                               payment_id, Retry::Attempts(0), Some(invoice.amount_msats() / 100 + 50_000))
+                       .is_ok()
+               );
+               assert!(outbound_payments.has_pending_payments());
+
                router.expect_find_route(
                        RouteParameters::from_payment_params_and_value(
                                PaymentParameters::from_bolt12_invoice(&invoice),
@@ -2139,11 +2163,6 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let payment_id = PaymentId([0; 32]);
 
-               assert!(
-                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
-               );
-               assert!(outbound_payments.has_pending_payments());
-
                let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .amount_msats(1000)
                        .build().unwrap()
@@ -2154,6 +2173,12 @@ mod tests {
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
 
+               assert!(outbound_payments.add_new_awaiting_invoice(
+                               payment_id, Retry::Attempts(0), Some(invoice.amount_msats() / 100 + 50_000))
+                       .is_ok()
+               );
+               assert!(outbound_payments.has_pending_payments());
+
                let route_params = RouteParameters::from_payment_params_and_value(
                        PaymentParameters::from_bolt12_invoice(&invoice),
                        invoice.amount_msats(),
index 21c4881ab1a08295cf7989ea99d3a47561f9656f..72176760243bf25cae3dd3adfcb656a4be6417cd 100644 (file)
@@ -114,19 +114,9 @@ fn mpp_retry() {
 
        // Add the HTLC along the first hop.
        let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
-       let (update_add, commitment_signed) = match fail_path_msgs_1 {
-               MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
-                       assert_eq!(update_add_htlcs.len(), 1);
-                       assert!(update_fail_htlcs.is_empty());
-                       assert!(update_fulfill_htlcs.is_empty());
-                       assert!(update_fail_malformed_htlcs.is_empty());
-                       assert!(update_fee.is_none());
-                       (update_add_htlcs[0].clone(), commitment_signed.clone())
-               },
-               _ => panic!("Unexpected event"),
-       };
-       nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
-       commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
+       let send_event = SendEvent::from_event(fail_path_msgs_1);
+       nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false);
 
        // Attempt to forward the payment and complete the 2nd path's failure.
        expect_pending_htlcs_forwardable!(&nodes[2]);
@@ -225,25 +215,9 @@ fn mpp_retry_overpay() {
 
        // Add the HTLC along the first hop.
        let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
-       let (update_add, commitment_signed) = match fail_path_msgs_1 {
-               MessageSendEvent::UpdateHTLCs {
-                       node_id: _,
-                       updates: msgs::CommitmentUpdate {
-                                       ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs,
-                                       ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed
-                       }
-               } => {
-                       assert_eq!(update_add_htlcs.len(), 1);
-                       assert!(update_fail_htlcs.is_empty());
-                       assert!(update_fulfill_htlcs.is_empty());
-                       assert!(update_fail_malformed_htlcs.is_empty());
-                       assert!(update_fee.is_none());
-                       (update_add_htlcs[0].clone(), commitment_signed.clone())
-               },
-               _ => panic!("Unexpected event"),
-       };
-       nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
-       commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
+       let send_event = SendEvent::from_event(fail_path_msgs_1);
+       nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false);
 
        // Attempt to forward the payment and complete the 2nd path's failure.
        expect_pending_htlcs_forwardable!(&nodes[2]);
@@ -279,6 +253,7 @@ fn mpp_retry_overpay() {
 
        route.paths.remove(0);
        route_params.final_value_msat -= first_path_value;
+       route.route_params.as_mut().map(|p| p.final_value_msat -= first_path_value);
        route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
 
        // Check the remaining max total routing fee for the second attempt accounts only for 1_000 msat
@@ -2421,9 +2396,11 @@ fn auto_retry_partial_failure() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
-       // Configure the initial send, retry1 and retry2's paths.
+       // Configure the initial send path
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       route_params.max_total_routing_fee_msat = None;
+
        let send_route = Route {
                paths: vec![
                        Path { hops: vec![RouteHop {
@@ -2447,6 +2424,14 @@ fn auto_retry_partial_failure() {
                ],
                route_params: Some(route_params.clone()),
        };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
+
+       // Configure the retry1 paths
+       let mut payment_params = route_params.payment_params.clone();
+       payment_params.previously_failed_channels.push(chan_2_id);
+       let mut retry_1_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2);
+       retry_1_params.max_total_routing_fee_msat = None;
+
        let retry_1_route = Route {
                paths: vec![
                        Path { hops: vec![RouteHop {
@@ -2468,8 +2453,16 @@ fn auto_retry_partial_failure() {
                                maybe_announced_channel: true,
                        }], blinded_tail: None },
                ],
-               route_params: Some(route_params.clone()),
+               route_params: Some(retry_1_params.clone()),
        };
+       nodes[0].router.expect_find_route(retry_1_params.clone(), Ok(retry_1_route));
+
+       // Configure the retry2 path
+       let mut payment_params = retry_1_params.payment_params.clone();
+       payment_params.previously_failed_channels.push(chan_3_id);
+       let mut retry_2_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4);
+       retry_2_params.max_total_routing_fee_msat = None;
+
        let retry_2_route = Route {
                paths: vec![
                        Path { hops: vec![RouteHop {
@@ -2482,19 +2475,9 @@ fn auto_retry_partial_failure() {
                                maybe_announced_channel: true,
                        }], blinded_tail: None },
                ],
-               route_params: Some(route_params.clone()),
+               route_params: Some(retry_2_params.clone()),
        };
-       nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
-       let mut payment_params = route_params.payment_params.clone();
-       payment_params.previously_failed_channels.push(chan_2_id);
-       nodes[0].router.expect_find_route(
-               RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2),
-               Ok(retry_1_route));
-       let mut payment_params = route_params.payment_params.clone();
-       payment_params.previously_failed_channels.push(chan_3_id);
-       nodes[0].router.expect_find_route(
-               RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4),
-               Ok(retry_2_route));
+       nodes[0].router.expect_find_route(retry_2_params, Ok(retry_2_route));
 
        // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -2718,8 +2701,9 @@ fn retry_multi_path_single_failed_payment() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters::from_payment_params_and_value(
+       let mut route_params = RouteParameters::from_payment_params_and_value(
                payment_params.clone(), amt_msat);
+       route_params.max_total_routing_fee_msat = None;
 
        let chans = nodes[0].node.list_usable_channels();
        let mut route = Route {
@@ -2751,11 +2735,11 @@ fn retry_multi_path_single_failed_payment() {
        route.paths[1].hops[0].fee_msat = 50_000_000;
        let mut pay_params = route.route_params.clone().unwrap().payment_params;
        pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap());
-       nodes[0].router.expect_find_route(
-               // Note that the second request here requests the amount we originally failed to send,
-               // not the amount remaining on the full payment, which should be changed.
-               RouteParameters::from_payment_params_and_value(pay_params, 100_000_001),
-               Ok(route.clone()));
+
+       let mut retry_params = RouteParameters::from_payment_params_and_value(pay_params, 100_000_000);
+       retry_params.max_total_routing_fee_msat = None;
+       route.route_params.as_mut().unwrap().final_value_msat = 100_000_000;
+       nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
 
        {
                let scorer = chanmon_cfgs[0].scorer.read().unwrap();
@@ -2898,7 +2882,8 @@ fn no_extra_retries_on_back_to_back_fail() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       route_params.max_total_routing_fee_msat = None;
 
        let mut route = Route {
                paths: vec![
@@ -2937,19 +2922,18 @@ fn no_extra_retries_on_back_to_back_fail() {
                                maybe_announced_channel: true,
                        }], blinded_tail: None }
                ],
-               route_params: Some(RouteParameters::from_payment_params_and_value(
-                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
-                       100_000_000)),
+               route_params: Some(route_params.clone()),
        };
+       route.route_params.as_mut().unwrap().max_total_routing_fee_msat = None;
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        let mut second_payment_params = route_params.payment_params.clone();
        second_payment_params.previously_failed_channels = vec![chan_2_scid, chan_2_scid];
        // On retry, we'll only return one path
        route.paths.remove(1);
        route.paths[0].hops[1].fee_msat = amt_msat;
-       nodes[0].router.expect_find_route(
-               RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat),
-               Ok(route.clone()));
+       let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat);
+       retry_params.max_total_routing_fee_msat = None;
+       nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -3102,7 +3086,8 @@ fn test_simple_partial_retry() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       route_params.max_total_routing_fee_msat = None;
 
        let mut route = Route {
                paths: vec![
@@ -3141,18 +3126,19 @@ fn test_simple_partial_retry() {
                                maybe_announced_channel: true,
                        }], blinded_tail: None }
                ],
-               route_params: Some(RouteParameters::from_payment_params_and_value(
-                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
-                       100_000_000)),
+               route_params: Some(route_params.clone()),
        };
+
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+
        let mut second_payment_params = route_params.payment_params.clone();
        second_payment_params.previously_failed_channels = vec![chan_2_scid];
        // On retry, we'll only be asked for one path (or 100k sats)
        route.paths.remove(0);
-       nodes[0].router.expect_find_route(
-               RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2),
-               Ok(route.clone()));
+       let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2);
+       retry_params.max_total_routing_fee_msat = None;
+       route.route_params.as_mut().unwrap().final_value_msat = amt_msat / 2;
+       nodes[0].router.expect_find_route(retry_params, Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -3311,11 +3297,7 @@ fn test_threaded_payment_retries() {
                                maybe_announced_channel: true,
                        }], blinded_tail: None }
                ],
-               route_params: Some(RouteParameters {
-                       payment_params: PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV),
-                       final_value_msat: amt_msat - amt_msat / 1000,
-                       max_total_routing_fee_msat: Some(500_000),
-               }),
+               route_params: Some(route_params.clone()),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
 
@@ -3334,6 +3316,7 @@ fn test_threaded_payment_retries() {
 
        // from here on out, the retry `RouteParameters` amount will be amt/1000
        route_params.final_value_msat /= 1000;
+       route.route_params.as_mut().unwrap().final_value_msat /= 1000;
        route.paths.pop();
 
        let end_time = Instant::now() + Duration::from_secs(1);
index fa08fba99fabda640739f714dd9eb6818d2ab374..151861411b997002de4792d5c031ed8079faee2e 100644 (file)
@@ -11,7 +11,7 @@
 
 use crate::chain::{ChannelMonitorUpdateStatus, Watch};
 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
-use crate::chain::channelmonitor::ChannelMonitor;
+use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
 use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
@@ -25,6 +25,7 @@ use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::config::UserConfig;
 use crate::util::string::UntrustedString;
 
+use bitcoin::{PackedLockTime, Transaction, TxOut};
 use bitcoin::hash_types::BlockHash;
 
 use crate::prelude::*;
@@ -1114,3 +1115,65 @@ fn removed_payment_no_manager_persistence() {
 
        expect_payment_failed!(nodes[0], payment_hash, false);
 }
+
+#[test]
+fn test_reload_partial_funding_batch() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let new_persister;
+       let new_chain_monitor;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let new_channel_manager;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       // The monitor is persisted when receiving funding_signed.
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // Reload the node while a subset of the channels in the funding batch have persisted monitors.
+       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let node_encoded = nodes[0].node.encode();
+       let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode();
+       reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager);
+
+       // Process monitor events.
+       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       // The funding transaction should not have been broadcast, but we broadcast the force-close
+       // transaction as part of closing the monitor.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
+
+       // Ensure the channels don't exist anymore.
+       assert!(nodes[0].node.list_channels().is_empty());
+}
index c20ce2e97ee835ca0cf2c77b8c7526c08dba5855..33182f7cb0d87f94723356345e0a2f3199b5eeb1 100644 (file)
@@ -139,7 +139,7 @@ impl<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc
                        source, target, short_channel_id
                ) {
                        let usage = ChannelUsage {
-                               inflight_htlc_msat: usage.inflight_htlc_msat + used_liquidity,
+                               inflight_htlc_msat: usage.inflight_htlc_msat.saturating_add(used_liquidity),
                                ..usage
                        };
 
@@ -341,7 +341,7 @@ impl Path {
 
 /// A route directs a payment from the sender (us) to the recipient. If the recipient supports MPP,
 /// it can take multiple paths. Each path is composed of one or more hops through the network.
-#[derive(Clone, Hash, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub struct Route {
        /// The list of [`Path`]s taken for a single (potentially-)multi-part payment. If no
        /// [`BlindedTail`]s are present, then the pubkey of the last [`RouteHop`] in each path must be
@@ -380,6 +380,12 @@ impl Route {
        }
 }
 
+impl fmt::Display for Route {
+       fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+               log_route!(self).fmt(f)
+       }
+}
+
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
@@ -475,14 +481,16 @@ pub struct RouteParameters {
        /// This limit also applies to the total fees that may arise while retrying failed payment
        /// paths.
        ///
-       /// Default value: `None`
+       /// Note that values below a few sats may result in some paths being spuriously ignored.
        pub max_total_routing_fee_msat: Option<u64>,
 }
 
 impl RouteParameters {
        /// Constructs [`RouteParameters`] from the given [`PaymentParameters`] and a payment amount.
+       ///
+       /// [`Self::max_total_routing_fee_msat`] defaults to 1% of the payment amount + 50 sats
        pub fn from_payment_params_and_value(payment_params: PaymentParameters, final_value_msat: u64) -> Self {
-               Self { payment_params, final_value_msat, max_total_routing_fee_msat: None }
+               Self { payment_params, final_value_msat, max_total_routing_fee_msat: Some(final_value_msat / 100 + 50_000) }
        }
 }
 
@@ -1257,7 +1265,11 @@ impl<'a> PaymentPath<'a> {
        // Note that this function is not aware of the available_liquidity limit, and thus does not
        // support increasing the value being transferred beyond what was selected during the initial
        // routing passes.
-       fn update_value_and_recompute_fees(&mut self, value_msat: u64) {
+       //
+       // Returns the amount that this path contributes to the total payment value, which may be greater
+       // than `value_msat` if we had to overpay to meet the final node's `htlc_minimum_msat`.
+       fn update_value_and_recompute_fees(&mut self, value_msat: u64) -> u64 {
+               let mut extra_contribution_msat = 0;
                let mut total_fee_paid_msat = 0 as u64;
                for i in (0..self.hops.len()).rev() {
                        let last_hop = i == self.hops.len() - 1;
@@ -1272,6 +1284,7 @@ impl<'a> PaymentPath<'a> {
 
                        let cur_hop = &mut self.hops.get_mut(i).unwrap().0;
                        cur_hop.next_hops_fee_msat = total_fee_paid_msat;
+                       cur_hop.path_penalty_msat += extra_contribution_msat;
                        // Overpay in fees if we can't save these funds due to htlc_minimum_msat.
                        // We try to account for htlc_minimum_msat in scoring (add_entry!), so that nodes don't
                        // set it too high just to maliciously take more fees by exploiting this
@@ -1287,8 +1300,15 @@ impl<'a> PaymentPath<'a> {
                                // Also, this can't be exploited more heavily than *announce a free path and fail
                                // all payments*.
                                cur_hop_transferred_amount_msat += extra_fees_msat;
-                               total_fee_paid_msat += extra_fees_msat;
-                               cur_hop_fees_msat += extra_fees_msat;
+
+                               // We remember and return the extra fees on the final hop to allow accounting for
+                               // them in the path's value contribution.
+                               if last_hop {
+                                       extra_contribution_msat = extra_fees_msat;
+                               } else {
+                                       total_fee_paid_msat += extra_fees_msat;
+                                       cur_hop_fees_msat += extra_fees_msat;
+                               }
                        }
 
                        if last_hop {
@@ -1316,6 +1336,7 @@ impl<'a> PaymentPath<'a> {
                                }
                        }
                }
+               value_msat + extra_contribution_msat
        }
 }
 
@@ -1481,6 +1502,9 @@ where L::Target: Logger {
        if payee_node_id_opt.map_or(false, |payee| payee == our_node_id) {
                return Err(LightningError{err: "Cannot generate a route to ourselves".to_owned(), action: ErrorAction::IgnoreError});
        }
+       if our_node_id == maybe_dummy_payee_node_id {
+               return Err(LightningError{err: "Invalid origin node id provided, use a different one".to_owned(), action: ErrorAction::IgnoreError});
+       }
 
        if final_value_msat > MAX_VALUE_MSAT {
                return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis".to_owned(), action: ErrorAction::IgnoreError});
@@ -1598,9 +1622,13 @@ where L::Target: Logger {
                        |info| info.features.supports_basic_mpp()))
        } else { false };
 
-       log_trace!(logger, "Searching for a route from payer {} to {} {} MPP and {} first hops {}overriding the network graph", our_node_pubkey,
-               LoggedPayeePubkey(payment_params.payee.node_id()), if allow_mpp { "with" } else { "without" },
-               first_hops.map(|hops| hops.len()).unwrap_or(0), if first_hops.is_some() { "" } else { "not " });
+       let max_total_routing_fee_msat = route_params.max_total_routing_fee_msat.unwrap_or(u64::max_value());
+
+       log_trace!(logger, "Searching for a route from payer {} to {} {} MPP and {} first hops {}overriding the network graph with a fee limit of {} msat",
+               our_node_pubkey, LoggedPayeePubkey(payment_params.payee.node_id()),
+               if allow_mpp { "with" } else { "without" },
+               first_hops.map(|hops| hops.len()).unwrap_or(0), if first_hops.is_some() { "" } else { "not " },
+               max_total_routing_fee_msat);
 
        // Step (1).
        // Prepare the data we'll use for payee-to-payer search by
@@ -1689,23 +1717,25 @@ where L::Target: Logger {
                LoggedPayeePubkey(payment_params.payee.node_id()), our_node_pubkey, final_value_msat);
 
        // Remember how many candidates we ignored to allow for some logging afterwards.
-       let mut num_ignored_value_contribution = 0;
-       let mut num_ignored_path_length_limit = 0;
-       let mut num_ignored_cltv_delta_limit = 0;
-       let mut num_ignored_previously_failed = 0;
-       let mut num_ignored_total_fee_limit = 0;
+       let mut num_ignored_value_contribution: u32 = 0;
+       let mut num_ignored_path_length_limit: u32 = 0;
+       let mut num_ignored_cltv_delta_limit: u32 = 0;
+       let mut num_ignored_previously_failed: u32 = 0;
+       let mut num_ignored_total_fee_limit: u32 = 0;
+       let mut num_ignored_avoid_overpayment: u32 = 0;
+       let mut num_ignored_htlc_minimum_msat_limit: u32 = 0;
 
        macro_rules! add_entry {
                // Adds entry which goes from $src_node_id to $dest_node_id over the $candidate hop.
                // $next_hops_fee_msat represents the fees paid for using all the channels *after* this one,
                // since that value has to be transferred over this channel.
-               // Returns whether this channel caused an update to `targets`.
+               // Returns the contribution amount of $candidate if the channel caused an update to `targets`.
                ( $candidate: expr, $src_node_id: expr, $dest_node_id: expr, $next_hops_fee_msat: expr,
                        $next_hops_value_contribution: expr, $next_hops_path_htlc_minimum_msat: expr,
                        $next_hops_path_penalty_msat: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { {
                        // We "return" whether we updated the path at the end, and how much we can route via
                        // this channel, via this:
-                       let mut did_add_update_path_to_src_node = None;
+                       let mut hop_contribution_amt_msat = None;
                        // Channels to self should not be used. This is more of belt-and-suspenders, because in
                        // practice these cases should be caught earlier:
                        // - for regular channels at channel announcement (TODO)
@@ -1762,9 +1792,9 @@ where L::Target: Logger {
                                        #[allow(unused_comparisons)] // $next_hops_path_htlc_minimum_msat is 0 in some calls so rustc complains
                                        let may_overpay_to_meet_path_minimum_msat =
                                                ((amount_to_transfer_over_msat < $candidate.htlc_minimum_msat() &&
-                                                 recommended_value_msat > $candidate.htlc_minimum_msat()) ||
+                                                 recommended_value_msat >= $candidate.htlc_minimum_msat()) ||
                                                 (amount_to_transfer_over_msat < $next_hops_path_htlc_minimum_msat &&
-                                                 recommended_value_msat > $next_hops_path_htlc_minimum_msat));
+                                                 recommended_value_msat >= $next_hops_path_htlc_minimum_msat));
 
                                        let payment_failed_on_this_channel = scid_opt.map_or(false,
                                                |scid| payment_params.previously_failed_channels.contains(&scid));
@@ -1773,6 +1803,7 @@ where L::Target: Logger {
                                                CandidateRouteHop::FirstHop { .. } => true,
                                                CandidateRouteHop::PrivateHop { .. } => true,
                                                CandidateRouteHop::Blinded { .. } => true,
+                                               CandidateRouteHop::OneHopBlinded { .. } => true,
                                                _ => false,
                                        };
 
@@ -1801,16 +1832,23 @@ where L::Target: Logger {
                                                }
                                                num_ignored_previously_failed += 1;
                                        } else if may_overpay_to_meet_path_minimum_msat {
+                                               if should_log_candidate {
+                                                       log_trace!(logger,
+                                                               "Ignoring {} to avoid overpaying to meet htlc_minimum_msat limit.",
+                                                               LoggedCandidateHop(&$candidate));
+                                               }
+                                               num_ignored_avoid_overpayment += 1;
                                                hit_minimum_limit = true;
                                        } else if over_path_minimum_msat {
                                                // Note that low contribution here (limited by available_liquidity_msat)
                                                // might violate htlc_minimum_msat on the hops which are next along the
                                                // payment path (upstream to the payee). To avoid that, we recompute
                                                // path fees knowing the final path contribution after constructing it.
-                                               let path_htlc_minimum_msat = cmp::max(
-                                                       compute_fees_saturating($next_hops_path_htlc_minimum_msat, $candidate.fees())
-                                                               .saturating_add($next_hops_path_htlc_minimum_msat),
-                                                       $candidate.htlc_minimum_msat());
+                                               let curr_min = cmp::max(
+                                                       $next_hops_path_htlc_minimum_msat, $candidate.htlc_minimum_msat()
+                                               );
+                                               let path_htlc_minimum_msat = compute_fees_saturating(curr_min, $candidate.fees())
+                                                       .saturating_add(curr_min);
                                                let hm_entry = dist.entry($src_node_id);
                                                let old_entry = hm_entry.or_insert_with(|| {
                                                        // If there was previously no known way to access the source node
@@ -1856,7 +1894,6 @@ where L::Target: Logger {
                                                        }
 
                                                        // Ignore hops if augmenting the current path to them would put us over `max_total_routing_fee_msat`
-                                                       let max_total_routing_fee_msat = route_params.max_total_routing_fee_msat.unwrap_or(u64::max_value());
                                                        if total_fee_msat > max_total_routing_fee_msat {
                                                                if should_log_candidate {
                                                                        log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate));
@@ -1918,7 +1955,7 @@ where L::Target: Logger {
                                                                        {
                                                                                old_entry.value_contribution_msat = value_contribution_msat;
                                                                        }
-                                                                       did_add_update_path_to_src_node = Some(value_contribution_msat);
+                                                                       hop_contribution_amt_msat = Some(value_contribution_msat);
                                                                } else if old_entry.was_processed && new_cost < old_cost {
                                                                        #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
                                                                        {
@@ -1950,10 +1987,17 @@ where L::Target: Logger {
                                                                }
                                                        }
                                                }
+                                       } else {
+                                               if should_log_candidate {
+                                                       log_trace!(logger,
+                                                               "Ignoring {} due to its htlc_minimum_msat limit.",
+                                                               LoggedCandidateHop(&$candidate));
+                                               }
+                                               num_ignored_htlc_minimum_msat_limit += 1;
                                        }
                                }
                        }
-                       did_add_update_path_to_src_node
+                       hop_contribution_amt_msat
                } }
        }
 
@@ -2071,7 +2115,7 @@ where L::Target: Logger {
                                // in the regular network graph.
                                first_hop_targets.get(&intro_node_id).is_some() ||
                                network_nodes.get(&intro_node_id).is_some();
-                       if !have_intro_node_in_graph { continue }
+                       if !have_intro_node_in_graph || our_node_id == intro_node_id { continue }
                        let candidate = if hint.1.blinded_hops.len() == 1 {
                                CandidateRouteHop::OneHopBlinded { hint, hint_idx }
                        } else { CandidateRouteHop::Blinded { hint, hint_idx } };
@@ -2090,9 +2134,10 @@ where L::Target: Logger {
                                                Some(fee) => fee,
                                                None => continue
                                        };
+                                       let path_min = candidate.htlc_minimum_msat().saturating_add(
+                                               compute_fees_saturating(candidate.htlc_minimum_msat(), candidate.fees()));
                                        add_entry!(first_hop_candidate, our_node_id, intro_node_id, blinded_path_fee,
-                                               path_contribution_msat, candidate.htlc_minimum_msat(), 0_u64,
-                                               candidate.cltv_expiry_delta(),
+                                               path_contribution_msat, path_min, 0_u64, candidate.cltv_expiry_delta(),
                                                candidate.blinded_path().map_or(1, |bp| bp.blinded_hops.len() as u8));
                                }
                        }
@@ -2100,14 +2145,15 @@ where L::Target: Logger {
                for route in payment_params.payee.unblinded_route_hints().iter()
                        .filter(|route| !route.0.is_empty())
                {
-                       let first_hop_in_route = &(route.0)[0];
-                       let have_hop_src_in_graph =
-                               // Only add the hops in this route to our candidate set if either
-                               // we have a direct channel to the first hop or the first hop is
-                               // in the regular network graph.
-                               first_hop_targets.get(&NodeId::from_pubkey(&first_hop_in_route.src_node_id)).is_some() ||
-                               network_nodes.get(&NodeId::from_pubkey(&first_hop_in_route.src_node_id)).is_some();
-                       if have_hop_src_in_graph {
+                       let first_hop_src_id = NodeId::from_pubkey(&route.0.first().unwrap().src_node_id);
+                       let first_hop_src_is_reachable =
+                               // Only add the hops in this route to our candidate set if either we are part of
+                               // the first hop, we have a direct channel to the first hop, or the first hop is in
+                               // the regular network graph.
+                               our_node_id == first_hop_src_id ||
+                               first_hop_targets.get(&first_hop_src_id).is_some() ||
+                               network_nodes.get(&first_hop_src_id).is_some();
+                       if first_hop_src_is_reachable {
                                // We start building the path from reverse, i.e., from payee
                                // to the first RouteHintHop in the path.
                                let hop_iter = route.0.iter().rev();
@@ -2124,6 +2170,15 @@ where L::Target: Logger {
                                for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
                                        let source = NodeId::from_pubkey(&hop.src_node_id);
                                        let target = NodeId::from_pubkey(&prev_hop_id);
+
+                                       if let Some(first_channels) = first_hop_targets.get(&target) {
+                                               if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
+                                                       log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.",
+                                                               hop.short_channel_id);
+                                                       break;
+                                               }
+                                       }
+
                                        let candidate = network_channels
                                                .get(&hop.short_channel_id)
                                                .and_then(|channel| channel.as_directed_to(&target))
@@ -2167,12 +2222,12 @@ where L::Target: Logger {
                                                .saturating_add(1);
 
                                        // Searching for a direct channel between last checked hop and first_hop_targets
-                                       if let Some(first_channels) = first_hop_targets.get_mut(&NodeId::from_pubkey(&prev_hop_id)) {
+                                       if let Some(first_channels) = first_hop_targets.get_mut(&target) {
                                                sort_first_hop_channels(first_channels, &used_liquidities,
                                                        recommended_value_msat, our_node_pubkey);
                                                for details in first_channels {
                                                        let first_hop_candidate = CandidateRouteHop::FirstHop { details };
-                                                       add_entry!(first_hop_candidate, our_node_id, NodeId::from_pubkey(&prev_hop_id),
+                                                       add_entry!(first_hop_candidate, our_node_id, target,
                                                                aggregate_next_hops_fee_msat, aggregate_path_contribution_msat,
                                                                aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat,
                                                                aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length);
@@ -2192,11 +2247,15 @@ where L::Target: Logger {
                                                .map_or(None, |inc| inc.checked_add(aggregate_next_hops_fee_msat));
                                        aggregate_next_hops_fee_msat = if let Some(val) = hops_fee { val } else { break; };
 
-                                       let hop_htlc_minimum_msat = candidate.htlc_minimum_msat();
-                                       let hop_htlc_minimum_msat_inc = if let Some(val) = compute_fees(aggregate_next_hops_path_htlc_minimum_msat, hop.fees) { val } else { break; };
-                                       let hops_path_htlc_minimum = aggregate_next_hops_path_htlc_minimum_msat
-                                               .checked_add(hop_htlc_minimum_msat_inc);
-                                       aggregate_next_hops_path_htlc_minimum_msat = if let Some(val) = hops_path_htlc_minimum { cmp::max(hop_htlc_minimum_msat, val) } else { break; };
+                                       // The next channel will need to relay this channel's min_htlc *plus* the fees taken by
+                                       // this route hint's source node to forward said min over this channel.
+                                       aggregate_next_hops_path_htlc_minimum_msat = {
+                                               let curr_htlc_min = cmp::max(
+                                                       candidate.htlc_minimum_msat(), aggregate_next_hops_path_htlc_minimum_msat
+                                               );
+                                               let curr_htlc_min_fee = if let Some(val) = compute_fees(curr_htlc_min, hop.fees) { val } else { break };
+                                               if let Some(min) = curr_htlc_min.checked_add(curr_htlc_min_fee) { min } else { break }
+                                       };
 
                                        if idx == route.0.len() - 1 {
                                                // The last hop in this iterator is the first hop in
@@ -2312,8 +2371,8 @@ where L::Target: Logger {
                                // recompute the fees again, so that if that's the case, we match the currently
                                // underpaid htlc_minimum_msat with fees.
                                debug_assert_eq!(payment_path.get_value_msat(), value_contribution_msat);
-                               value_contribution_msat = cmp::min(value_contribution_msat, final_value_msat);
-                               payment_path.update_value_and_recompute_fees(value_contribution_msat);
+                               let desired_value_contribution = cmp::min(value_contribution_msat, final_value_msat);
+                               value_contribution_msat = payment_path.update_value_and_recompute_fees(desired_value_contribution);
 
                                // Since a path allows to transfer as much value as
                                // the smallest channel it has ("bottleneck"), we should recompute
@@ -2398,6 +2457,9 @@ where L::Target: Logger {
                // because we deterministically terminated the search due to low liquidity.
                if !found_new_path && channel_saturation_pow_half != 0 {
                        channel_saturation_pow_half = 0;
+               } else if !found_new_path && hit_minimum_limit && already_collected_value_msat < final_value_msat && path_value_msat != recommended_value_msat {
+                       log_trace!(logger, "Failed to collect enough value, but running again to collect extra paths with a potentially higher limit.");
+                       path_value_msat = recommended_value_msat;
                } else if already_collected_value_msat >= recommended_value_msat || !found_new_path {
                        log_trace!(logger, "Have now collected {} msat (seeking {} msat) in paths. Last path loop {} a new path.",
                                already_collected_value_msat, recommended_value_msat, if found_new_path { "found" } else { "did not find" });
@@ -2412,15 +2474,22 @@ where L::Target: Logger {
                                log_trace!(logger, "Collected exactly our payment amount on the first pass, without hitting an htlc_minimum_msat limit, exiting.");
                                break 'paths_collection;
                        }
-                       log_trace!(logger, "Collected our payment amount on the first pass, but running again to collect extra paths with a potentially higher limit.");
+                       log_trace!(logger, "Collected our payment amount on the first pass, but running again to collect extra paths with a potentially higher value to meet htlc_minimum_msat limit.");
                        path_value_msat = recommended_value_msat;
                }
        }
 
        let num_ignored_total = num_ignored_value_contribution + num_ignored_path_length_limit +
-               num_ignored_cltv_delta_limit + num_ignored_previously_failed + num_ignored_total_fee_limit;
+               num_ignored_cltv_delta_limit + num_ignored_previously_failed +
+               num_ignored_avoid_overpayment + num_ignored_htlc_minimum_msat_limit +
+               num_ignored_total_fee_limit;
        if num_ignored_total > 0 {
-               log_trace!(logger, "Ignored {} candidate hops due to insufficient value contribution, {} due to path length limit, {} due to CLTV delta limit, {} due to previous payment failure, {} due to maximum total fee limit. Total: {} ignored candidates.", num_ignored_value_contribution, num_ignored_path_length_limit, num_ignored_cltv_delta_limit, num_ignored_previously_failed, num_ignored_total_fee_limit, num_ignored_total);
+               log_trace!(logger,
+                       "Ignored {} candidate hops due to insufficient value contribution, {} due to path length limit, {} due to CLTV delta limit, {} due to previous payment failure, {} due to htlc_minimum_msat limit, {} to avoid overpaying, {} due to maximum total fee limit. Total: {} ignored candidates.",
+                       num_ignored_value_contribution, num_ignored_path_length_limit,
+                       num_ignored_cltv_delta_limit, num_ignored_previously_failed,
+                       num_ignored_htlc_minimum_msat_limit, num_ignored_avoid_overpayment,
+                       num_ignored_total_fee_limit, num_ignored_total);
        }
 
        // Step (5).
@@ -2562,14 +2631,6 @@ where L::Target: Logger {
        // Make sure we would never create a route with more paths than we allow.
        debug_assert!(paths.len() <= payment_params.max_path_count.into());
 
-       // Make sure we would never create a route whose total fees exceed max_total_routing_fee_msat.
-       if let Some(max_total_routing_fee_msat) = route_params.max_total_routing_fee_msat {
-               if paths.iter().map(|p| p.fee_msat()).sum::<u64>() > max_total_routing_fee_msat {
-                       return Err(LightningError{err: format!("Failed to find route that adheres to the maximum total fee limit of {}msat",
-                               max_total_routing_fee_msat), action: ErrorAction::IgnoreError});
-               }
-       }
-
        if let Some(node_features) = payment_params.payee.node_features() {
                for path in paths.iter_mut() {
                        path.hops.last_mut().unwrap().node_features = node_features.clone();
@@ -2577,6 +2638,15 @@ where L::Target: Logger {
        }
 
        let route = Route { paths, route_params: Some(route_params.clone()) };
+
+       // Make sure we would never create a route whose total fees exceed max_total_routing_fee_msat.
+       if let Some(max_total_routing_fee_msat) = route_params.max_total_routing_fee_msat {
+               if route.get_total_fees() > max_total_routing_fee_msat {
+                       return Err(LightningError{err: format!("Failed to find route that adheres to the maximum total fee limit of {}msat",
+                               max_total_routing_fee_msat), action: ErrorAction::IgnoreError});
+               }
+       }
+
        log_info!(logger, "Got route: {}", log_route!(route));
        Ok(route)
 }
@@ -2876,7 +2946,7 @@ mod tests {
                        Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
                } else { panic!(); }
+
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
                        Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
@@ -3092,8 +3162,9 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route_params = RouteParameters::from_payment_params_and_value(
+               let mut route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 60_000);
+               route_params.max_total_routing_fee_msat = Some(15_000);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
                        Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                // Overpay fees to hit htlc_minimum_msat.
@@ -3159,6 +3230,67 @@ mod tests {
                assert_eq!(fees, 5_000);
        }
 
+       #[test]
+       fn htlc_minimum_recipient_overpay_test() {
+               let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
+               let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+               let config = UserConfig::default();
+               let payment_params = PaymentParameters::from_node_id(nodes[2], 42).with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+               // Route to node2 over a single path which requires overpaying the recipient themselves.
+
+               // First disable all paths except the us -> node1 -> node2 path
+               update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+                       chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+                       short_channel_id: 13,
+                       timestamp: 2,
+                       flags: 3,
+                       cltv_expiry_delta: 0,
+                       htlc_minimum_msat: 0,
+                       htlc_maximum_msat: 0,
+                       fee_base_msat: 0,
+                       fee_proportional_millionths: 0,
+                       excess_data: Vec::new()
+               });
+
+               // Set channel 4 to free but with a high htlc_minimum_msat
+               update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+                       chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+                       short_channel_id: 4,
+                       timestamp: 2,
+                       flags: 0,
+                       cltv_expiry_delta: 0,
+                       htlc_minimum_msat: 15_000,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
+                       fee_base_msat: 0,
+                       fee_proportional_millionths: 0,
+                       excess_data: Vec::new()
+               });
+
+               // Now check that we'll fail to find a path if we fail to find a path if the htlc_minimum
+               // is overrun. Note that the fees are actually calculated on 3*payment amount as that's
+               // what we try to find a route for, so this test only just happens to work out to exactly
+               // the fee limit.
+               let mut route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 5_000);
+               route_params.max_total_routing_fee_msat = Some(9_999);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer,
+                       &Default::default(), &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find route that adheres to the maximum total fee limit of 9999msat");
+               } else { panic!(); }
+
+               let mut route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 5_000);
+               route_params.max_total_routing_fee_msat = Some(10_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
+               assert_eq!(route.get_total_fees(), 10_000);
+       }
+
        #[test]
        fn disable_channels_test() {
                let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
@@ -5751,8 +5883,9 @@ mod tests {
                {
                        // Now, attempt to route 90 sats, which is exactly 90 sats at the last hop, plus the
                        // 200% fee charged channel 13 in the 1-to-2 direction.
-                       let route_params = RouteParameters::from_payment_params_and_value(
+                       let mut route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 90_000);
+                       route_params.max_total_routing_fee_msat = Some(90_000*2);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
@@ -5820,8 +5953,9 @@ mod tests {
                        // Now, attempt to route 90 sats, hitting the htlc_minimum on channel 4, but
                        // overshooting the htlc_maximum on channel 2. Thus, we should pick the (absurdly
                        // expensive) channels 12-13 path.
-                       let route_params = RouteParameters::from_payment_params_and_value(
+                       let mut route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 90_000);
+                       route_params.max_total_routing_fee_msat = Some(90_000*2);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
@@ -6526,8 +6660,9 @@ mod tests {
 
                // Make sure we'll error if our route hints don't have enough liquidity according to their
                // htlc_maximum_msat.
-               let route_params = RouteParameters::from_payment_params_and_value(
+               let mut route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, max_htlc_msat + 1);
+               route_params.max_total_routing_fee_msat = None;
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
                        &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &Default::default(),
                        &random_seed_bytes)
@@ -6541,8 +6676,9 @@ mod tests {
                let payment_params = PaymentParameters::from_node_id(dest_node_id, 42)
                        .with_route_hints(vec![route_hint_1, route_hint_2]).unwrap()
                        .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
-               let route_params = RouteParameters::from_payment_params_and_value(
+               let mut route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, max_htlc_msat + 1);
+               route_params.max_total_routing_fee_msat = Some(max_htlc_msat * 2);
                let route = get_route(&our_id, &route_params, &netgraph, None, Arc::clone(&logger),
                        &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
@@ -6977,7 +7113,8 @@ mod tests {
                let payment_params = PaymentParameters::blinded(blinded_hints.clone())
                        .with_bolt12_features(bolt12_features.clone()).unwrap();
 
-               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100_000);
+               let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, 100_000);
+               route_params.max_total_routing_fee_msat = Some(100_000);
                let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
                        &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
@@ -7152,6 +7289,412 @@ mod tests {
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_msat);
        }
+
+       #[test]
+       fn we_are_intro_node_candidate_hops() {
+               // This previously led to a panic in the router because we'd generate a Path with only a
+               // BlindedTail and 0 unblinded hops, due to the only candidate hops being blinded route hints
+               // where the origin node is the intro node. We now fully disallow considering candidate hops
+               // where the origin node is the intro node.
+               let (secp_ctx, network_graph, _, _, logger) = build_graph();
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 21_7020_5185_1423_0019;
+
+               let blinded_path = BlindedPath {
+                       introduction_node_id: our_id,
+                       blinding_point: ln_test_utils::pubkey(42),
+                       blinded_hops: vec![
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                       ],
+               };
+               let blinded_payinfo = BlindedPayInfo {
+                       fee_base_msat: 5052_9027,
+                       fee_proportional_millionths: 0,
+                       htlc_minimum_msat: 21_7020_5185_1423_0019,
+                       htlc_maximum_msat: 1844_6744_0737_0955_1615,
+                       cltv_expiry_delta: 0,
+                       features: BlindedHopFeatures::empty(),
+               };
+               let mut blinded_hints = vec![
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+               ];
+               blinded_hints[1].1.introduction_node_id = nodes[6];
+
+               let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+               let payment_params = PaymentParameters::blinded(blinded_hints.clone())
+                       .with_bolt12_features(bolt12_features.clone()).unwrap();
+
+               let netgraph = network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               if let Err(LightningError { err, .. }) = get_route(
+                       &our_id, &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &(), &random_seed_bytes
+               ) {
+                       assert_eq!(err, "Failed to find a path to the given destination");
+               } else { panic!() }
+       }
+
+       #[test]
+       fn we_are_intro_node_bp_in_final_path_fee_calc() {
+               // This previously led to a debug panic in the router because we'd find an invalid Path with
+               // 0 unblinded hops and a blinded tail, leading to the generation of a final
+               // PaymentPathHop::fee_msat that included both the blinded path fees and the final value of
+               // the payment, when it was intended to only include the final value of the payment.
+               let (secp_ctx, network_graph, _, _, logger) = build_graph();
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 21_7020_5185_1423_0019;
+
+               let blinded_path = BlindedPath {
+                       introduction_node_id: our_id,
+                       blinding_point: ln_test_utils::pubkey(42),
+                       blinded_hops: vec![
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                       ],
+               };
+               let blinded_payinfo = BlindedPayInfo {
+                       fee_base_msat: 10_4425_1395,
+                       fee_proportional_millionths: 0,
+                       htlc_minimum_msat: 21_7301_9934_9094_0931,
+                       htlc_maximum_msat: 1844_6744_0737_0955_1615,
+                       cltv_expiry_delta: 0,
+                       features: BlindedHopFeatures::empty(),
+               };
+               let mut blinded_hints = vec![
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+               ];
+               blinded_hints[1].0.fee_base_msat = 5052_9027;
+               blinded_hints[1].0.htlc_minimum_msat = 21_7020_5185_1423_0019;
+               blinded_hints[1].0.htlc_maximum_msat = 1844_6744_0737_0955_1615;
+
+               blinded_hints[2].1.introduction_node_id = nodes[6];
+
+               let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+               let payment_params = PaymentParameters::blinded(blinded_hints.clone())
+                       .with_bolt12_features(bolt12_features.clone()).unwrap();
+
+               let netgraph = network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               if let Err(LightningError { err, .. }) = get_route(
+                       &our_id, &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &(), &random_seed_bytes
+               ) {
+                       assert_eq!(err, "Failed to find a path to the given destination");
+               } else { panic!() }
+       }
+
+       #[test]
+       fn min_htlc_overpay_violates_max_htlc() {
+               do_min_htlc_overpay_violates_max_htlc(true);
+               do_min_htlc_overpay_violates_max_htlc(false);
+       }
+       fn do_min_htlc_overpay_violates_max_htlc(blinded_payee: bool) {
+               // Test that if overpaying to meet a later hop's min_htlc and causes us to violate an earlier
+               // hop's max_htlc, we don't consider that candidate hop valid. Previously we would add this hop
+               // to `targets` and build an invalid path with it, and subsquently hit a debug panic asserting
+               // that the used liquidity for a hop was less than its available liquidity limit.
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(ln_test_utils::TestLogger::new());
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 7_4009_8048;
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let first_hop_outbound_capacity = 2_7345_2000;
+               let first_hops = vec![get_channel_details(
+                       Some(200), nodes[0], channelmanager::provided_init_features(&config),
+                       first_hop_outbound_capacity
+               )];
+
+               let base_fee = 1_6778_3453;
+               let htlc_min = 2_5165_8240;
+               let payment_params = if blinded_payee {
+                       let blinded_path = BlindedPath {
+                               introduction_node_id: nodes[0],
+                               blinding_point: ln_test_utils::pubkey(42),
+                               blinded_hops: vec![
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                               ],
+                       };
+                       let blinded_payinfo = BlindedPayInfo {
+                               fee_base_msat: base_fee,
+                               fee_proportional_millionths: 0,
+                               htlc_minimum_msat: htlc_min,
+                               htlc_maximum_msat: htlc_min * 1000,
+                               cltv_expiry_delta: 0,
+                               features: BlindedHopFeatures::empty(),
+                       };
+                       let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+                       PaymentParameters::blinded(vec![(blinded_payinfo, blinded_path)])
+                               .with_bolt12_features(bolt12_features.clone()).unwrap()
+               } else {
+                       let route_hint = RouteHint(vec![RouteHintHop {
+                               src_node_id: nodes[0],
+                               short_channel_id: 42,
+                               fees: RoutingFees {
+                                       base_msat: base_fee,
+                                       proportional_millionths: 0,
+                               },
+                               cltv_expiry_delta: 10,
+                               htlc_minimum_msat: Some(htlc_min),
+                               htlc_maximum_msat: None,
+                       }]);
+
+                       PaymentParameters::from_node_id(nodes[1], 42)
+                               .with_route_hints(vec![route_hint]).unwrap()
+                               .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap()
+               };
+
+               let netgraph = network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               if let Err(LightningError { err, .. }) = get_route(
+                       &our_id, &route_params, &netgraph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
+               ) {
+                       assert_eq!(err, "Failed to find a path to the given destination");
+               } else { panic!() }
+       }
+
+       #[test]
+       fn previously_used_liquidity_violates_max_htlc() {
+               do_previously_used_liquidity_violates_max_htlc(true);
+               do_previously_used_liquidity_violates_max_htlc(false);
+
+       }
+       fn do_previously_used_liquidity_violates_max_htlc(blinded_payee: bool) {
+               // Test that if a candidate first_hop<>route_hint_src_node channel does not have enough
+               // contribution amount to cover the next hop's min_htlc plus fees, we will not consider that
+               // candidate. In this case, the candidate does not have enough due to a previous path taking up
+               // some of its liquidity. Previously we would construct an invalid path and hit a debug panic
+               // asserting that the used liquidity for a hop was less than its available liquidity limit.
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(ln_test_utils::TestLogger::new());
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 52_4288;
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let first_hops = vec![get_channel_details(
+                       Some(161), nodes[0], channelmanager::provided_init_features(&config), 486_4000
+               ), get_channel_details(
+                       Some(122), nodes[0], channelmanager::provided_init_features(&config), 179_5000
+               )];
+
+               let base_fees = [0, 425_9840, 0, 0];
+               let htlc_mins = [1_4392, 19_7401, 1027, 6_5535];
+               let payment_params = if blinded_payee {
+                       let blinded_path = BlindedPath {
+                               introduction_node_id: nodes[0],
+                               blinding_point: ln_test_utils::pubkey(42),
+                               blinded_hops: vec![
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                               ],
+                       };
+                       let mut blinded_hints = Vec::new();
+                       for (base_fee, htlc_min) in base_fees.iter().zip(htlc_mins.iter()) {
+                               blinded_hints.push((BlindedPayInfo {
+                                       fee_base_msat: *base_fee,
+                                       fee_proportional_millionths: 0,
+                                       htlc_minimum_msat: *htlc_min,
+                                       htlc_maximum_msat: htlc_min * 100,
+                                       cltv_expiry_delta: 10,
+                                       features: BlindedHopFeatures::empty(),
+                               }, blinded_path.clone()));
+                       }
+                       let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+                       PaymentParameters::blinded(blinded_hints.clone())
+                               .with_bolt12_features(bolt12_features.clone()).unwrap()
+               } else {
+                       let mut route_hints = Vec::new();
+                       for (idx, (base_fee, htlc_min)) in base_fees.iter().zip(htlc_mins.iter()).enumerate() {
+                               route_hints.push(RouteHint(vec![RouteHintHop {
+                                       src_node_id: nodes[0],
+                                       short_channel_id: 42 + idx as u64,
+                                       fees: RoutingFees {
+                                               base_msat: *base_fee,
+                                               proportional_millionths: 0,
+                                       },
+                                       cltv_expiry_delta: 10,
+                                       htlc_minimum_msat: Some(*htlc_min),
+                                       htlc_maximum_msat: Some(htlc_min * 100),
+                               }]));
+                       }
+                       PaymentParameters::from_node_id(nodes[1], 42)
+                               .with_route_hints(route_hints).unwrap()
+                               .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap()
+               };
+
+               let netgraph = network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+
+               let route = get_route(
+                       &our_id, &route_params, &netgraph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
+               ).unwrap();
+               assert_eq!(route.paths.len(), 1);
+               assert_eq!(route.get_total_amount(), amt_msat);
+       }
+
+       #[test]
+       fn candidate_path_min() {
+               // Test that if a candidate first_hop<>network_node channel does not have enough contribution
+               // amount to cover the next channel's min htlc plus fees, we will not consider that candidate.
+               // Previously, we were storing RouteGraphNodes with a path_min that did not include fees, and
+               // would add a connecting first_hop node that did not have enough contribution amount, leading
+               // to a debug panic upon invalid path construction.
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(ln_test_utils::TestLogger::new());
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
+               let gossip_sync = P2PGossipSync::new(network_graph.clone(), None, logger.clone());
+               let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), network_graph.clone(), logger.clone());
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 7_4009_8048;
+               let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+               let first_hops = vec![get_channel_details(
+                       Some(200), nodes[0], channelmanager::provided_init_features(&config), 2_7345_2000
+               )];
+
+               add_channel(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[6], ChannelFeatures::from_le_bytes(id_to_feature_flags(6)), 6);
+               update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+                       chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+                       short_channel_id: 6,
+                       timestamp: 1,
+                       flags: 0,
+                       cltv_expiry_delta: (6 << 4) | 0,
+                       htlc_minimum_msat: 0,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
+                       fee_base_msat: 0,
+                       fee_proportional_millionths: 0,
+                       excess_data: Vec::new()
+               });
+               add_or_update_node(&gossip_sync, &secp_ctx, &privkeys[0], NodeFeatures::from_le_bytes(id_to_feature_flags(1)), 0);
+
+               let htlc_min = 2_5165_8240;
+               let blinded_hints = vec![
+                       (BlindedPayInfo {
+                               fee_base_msat: 1_6778_3453,
+                               fee_proportional_millionths: 0,
+                               htlc_minimum_msat: htlc_min,
+                               htlc_maximum_msat: htlc_min * 100,
+                               cltv_expiry_delta: 10,
+                               features: BlindedHopFeatures::empty(),
+                       }, BlindedPath {
+                               introduction_node_id: nodes[0],
+                               blinding_point: ln_test_utils::pubkey(42),
+                               blinded_hops: vec![
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                               ],
+                       })
+               ];
+               let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+               let payment_params = PaymentParameters::blinded(blinded_hints.clone())
+                       .with_bolt12_features(bolt12_features.clone()).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let netgraph = network_graph.read_only();
+
+               if let Err(LightningError { err, .. }) = get_route(
+                       &our_id, &route_params, &netgraph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(),
+                       &random_seed_bytes
+               ) {
+                       assert_eq!(err, "Failed to find a path to the given destination");
+               } else { panic!() }
+       }
+
+       #[test]
+       fn path_contribution_includes_min_htlc_overpay() {
+               // Previously, the fuzzer hit a debug panic because we wouldn't include the amount overpaid to
+               // meet a last hop's min_htlc in the total collected paths value. We now include this value and
+               // also penalize hops along the overpaying path to ensure that it gets deprioritized in path
+               // selection, both tested here.
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(ln_test_utils::TestLogger::new());
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
+               let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), network_graph.clone(), logger.clone());
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 562_0000;
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let first_hops = vec![
+                       get_channel_details(
+                               Some(83), nodes[0], channelmanager::provided_init_features(&config), 2199_0000,
+                       ),
+               ];
+
+               let htlc_mins = [49_0000, 1125_0000];
+               let payment_params = {
+                       let blinded_path = BlindedPath {
+                               introduction_node_id: nodes[0],
+                               blinding_point: ln_test_utils::pubkey(42),
+                               blinded_hops: vec![
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                                       BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                               ],
+                       };
+                       let mut blinded_hints = Vec::new();
+                       for htlc_min in htlc_mins.iter() {
+                               blinded_hints.push((BlindedPayInfo {
+                                       fee_base_msat: 0,
+                                       fee_proportional_millionths: 0,
+                                       htlc_minimum_msat: *htlc_min,
+                                       htlc_maximum_msat: *htlc_min * 100,
+                                       cltv_expiry_delta: 10,
+                                       features: BlindedHopFeatures::empty(),
+                               }, blinded_path.clone()));
+                       }
+                       let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+                       PaymentParameters::blinded(blinded_hints.clone())
+                               .with_bolt12_features(bolt12_features.clone()).unwrap()
+               };
+
+               let netgraph = network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(
+                       &our_id, &route_params, &netgraph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(),
+                       &random_seed_bytes
+               ).unwrap();
+               assert_eq!(route.paths.len(), 1);
+               assert_eq!(route.get_total_amount(), amt_msat);
+       }
 }
 
 #[cfg(all(any(test, ldk_bench), not(feature = "no-std")))]
index f25c947644838f1dfaec843c74c26c8b4f935f72..5b42796a94f9586fe2dd42fd7210fada4be7297f 100644 (file)
@@ -68,7 +68,7 @@ pub struct KeyMaterial(pub [u8; 32]);
 /// Information about a spendable output to a P2WSH script.
 ///
 /// See [`SpendableOutputDescriptor::DelayedPaymentOutput`] for more details on how to spend this.
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub struct DelayedPaymentOutputDescriptor {
        /// The outpoint which is spendable.
        pub outpoint: OutPoint,
@@ -110,7 +110,7 @@ impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, {
 /// Information about a spendable output to our "payment key".
 ///
 /// See [`SpendableOutputDescriptor::StaticPaymentOutput`] for more details on how to spend this.
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub struct StaticPaymentOutputDescriptor {
        /// The outpoint which is spendable.
        pub outpoint: OutPoint,
@@ -146,7 +146,7 @@ impl_writeable_tlv_based!(StaticPaymentOutputDescriptor, {
 /// at that `txid`/`index`, and any keys or other information required to sign.
 ///
 /// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub enum SpendableOutputDescriptor {
        /// An output to a script which was provided via [`SignerProvider`] directly, either from
        /// [`get_destination_script`] or [`get_shutdown_scriptpubkey`], thus you should already
index 431c62c9fb83de88830319e6b87b2f7370ac7f90..2a022c37cc4a476e3bd3c2ecac076b53f25e9d87 100644 (file)
@@ -8,25 +8,28 @@
 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
 //! and [`ChannelMonitor`] all in one place.
 
+use core::cmp;
+use core::convert::{TryFrom, TryInto};
 use core::ops::Deref;
 use bitcoin::hashes::hex::{FromHex, ToHex};
 use bitcoin::{BlockHash, Txid};
 
-use crate::io;
-use crate::prelude::{Vec, String};
-use crate::routing::scoring::WriteableScore;
+use crate::{io, log_error};
+use crate::alloc::string::ToString;
+use crate::prelude::*;
 
 use crate::chain;
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
 use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
 use crate::chain::transaction::OutPoint;
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
 use crate::ln::channelmanager::ChannelManager;
 use crate::routing::router::Router;
 use crate::routing::gossip::NetworkGraph;
+use crate::routing::scoring::WriteableScore;
 use crate::util::logger::Logger;
-use crate::util::ser::{ReadableArgs, Writeable};
+use crate::util::ser::{Readable, ReadableArgs, Writeable};
 
 /// The alphabet of characters allowed for namespaces and keys.
 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
@@ -34,64 +37,75 @@ pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCD
 /// The maximum number of characters namespaces and keys may have.
 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
 
-/// The namespace under which the [`ChannelManager`] will be persisted.
-pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
-/// The sub-namespace under which the [`ChannelManager`] will be persisted.
-pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
 /// The key under which the [`ChannelManager`] will be persisted.
 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
 
-/// The namespace under which [`ChannelMonitor`]s will be persisted.
-pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
-/// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
-pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
+/// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
+/// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
+pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
 
-/// The namespace under which the [`NetworkGraph`] will be persisted.
-pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
-/// The sub-namespace under which the [`NetworkGraph`] will be persisted.
-pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
 /// The key under which the [`NetworkGraph`] will be persisted.
 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
 
-/// The namespace under which the [`WriteableScore`] will be persisted.
-pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
-/// The sub-namespace under which the [`WriteableScore`] will be persisted.
-pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
 /// The key under which the [`WriteableScore`] will be persisted.
 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
 
+/// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
+///
+/// This serves to prevent someone from accidentally loading such monitors (which may need
+/// updates applied to be current) with another implementation.
+pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
+
 /// Provides an interface that allows storage and retrieval of persisted values that are associated
 /// with given keys.
 ///
-/// In order to avoid collisions the key space is segmented based on the given `namespace`s and
-/// `sub_namespace`s. Implementations of this trait are free to handle them in different ways, as
-/// long as per-namespace key uniqueness is asserted.
+/// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
+/// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
+/// ways, as long as per-namespace key uniqueness is asserted.
 ///
 /// Keys and namespaces are required to be valid ASCII strings in the range of
 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
-/// namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if `namespace` is
-/// empty, `sub_namespace` is required to be empty, too. This means that concerns should always be
-/// separated by namespace first, before sub-namespaces are used. While the number of namespaces
-/// will be relatively small and is determined at compile time, there may be many sub-namespaces
-/// per namespace. Note that per-namespace uniqueness needs to also hold for keys *and*
-/// namespaces/sub-namespaces in any given namespace/sub-namespace, i.e., conflicts between keys
-/// and equally named namespaces/sub-namespaces must be avoided.
+/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
+/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
+/// that concerns should always be separated by primary namespace first, before secondary
+/// namespaces are used. While the number of primary namespaces will be relatively small and is
+/// determined at compile time, there may be many secondary namespaces per primary namespace. Note
+/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
+/// namespace, i.e., conflicts between keys and equally named
+/// primary namespaces/secondary namespaces must be avoided.
 ///
 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
-/// interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the
-/// data model previously assumed by `KVStorePersister::persist`.
+/// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
+/// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
 pub trait KVStore {
-       /// Returns the data stored for the given `namespace`, `sub_namespace`, and `key`.
+       /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
+       /// `key`.
        ///
        /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
-       /// `namespace` and `sub_namespace`.
+       /// `primary_namespace` and `secondary_namespace`.
        ///
        /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
-       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>>;
+       fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
        /// Persists the given data under the given `key`.
        ///
-       /// Will create the given `namespace` and `sub_namespace` if not already present in the store.
-       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()>;
+       /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
+       /// in the store.
+       fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
        /// Removes any data that had previously been persisted under the given `key`.
        ///
        /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
@@ -104,14 +118,16 @@ pub trait KVStore {
        /// potentially get lost on crash after the method returns. Therefore, this flag should only be
        /// set for `remove` operations that can be safely replayed at a later time.
        ///
-       /// Returns successfully if no data will be stored for the given `namespace`, `sub_namespace`, and
-       /// `key`, independently of whether it was present before its invokation or not.
-       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()>;
-       /// Returns a list of keys that are stored under the given `sub_namespace` in `namespace`.
+       /// Returns successfully if no data will be stored for the given `primary_namespace`,
+       /// `secondary_namespace`, and `key`, independently of whether it was present before its
+       /// invokation or not.
+       fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
+       /// Returns a list of keys that are stored under the given `secondary_namespace` in
+       /// `primary_namespace`.
        ///
        /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
-       /// returned keys. Returns an empty list if `namespace` or `sub_namespace` is unknown.
-       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>>;
+       /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
+       fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
 }
 
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
@@ -148,26 +164,26 @@ impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Der
 {
        /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
-               self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
-                                  CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
-                                  CHANNEL_MANAGER_PERSISTENCE_KEY,
-                                  &channel_manager.encode())
+               self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
+                       CHANNEL_MANAGER_PERSISTENCE_KEY,
+                       &channel_manager.encode())
        }
 
        /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
-               self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
-                                  NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
-                                  NETWORK_GRAPH_PERSISTENCE_KEY,
-                                  &network_graph.encode())
+               self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
+                       NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
+                       NETWORK_GRAPH_PERSISTENCE_KEY,
+                       &network_graph.encode())
        }
 
        /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
-               self.write(SCORER_PERSISTENCE_NAMESPACE,
-                                  SCORER_PERSISTENCE_SUB_NAMESPACE,
-                                  SCORER_PERSISTENCE_KEY,
-                                  &scorer.encode())
+               self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
+                       SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
+                       SCORER_PERSISTENCE_KEY,
+                       &scorer.encode())
        }
 }
 
@@ -180,8 +196,8 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
                match self.write(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                        &key, &monitor.encode())
                {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
@@ -192,8 +208,8 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
                match self.write(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                        &key, &monitor.encode())
                {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
@@ -205,7 +221,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
 /// Read previously persisted [`ChannelMonitor`]s from the store.
 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
        kv_store: K, entropy_source: ES, signer_provider: SP,
-) -> io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
+) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
 where
        K::Target: KVStore,
        ES::Target: EntropySource + Sized,
@@ -214,7 +230,7 @@ where
        let mut res = Vec::new();
 
        for stored_key in kv_store.list(
-               CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
+               CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
        {
                if stored_key.len() < 66 {
                        return Err(io::Error::new(
@@ -232,7 +248,7 @@ where
 
                match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
                        &mut io::Cursor::new(
-                               kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
+                               kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
                        (&*entropy_source, &*signer_provider),
                ) {
                        Ok((block_hash, channel_monitor)) => {
@@ -249,10 +265,939 @@ where
                        Err(_) => {
                                return Err(io::Error::new(
                                        io::ErrorKind::InvalidData,
-                                       "Failed to deserialize ChannelMonitor"
+                                       "Failed to read ChannelMonitor"
                                ))
                        }
                }
        }
        Ok(res)
 }
+
+/// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
+/// [`ChannelMonitorUpdate`]s.
+///
+/// # Overview
+///
+/// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
+/// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
+/// deleting) and complexity. This is because it writes channel monitor differential updates,
+/// whereas the other (default) implementation rewrites the entire monitor on each update. For
+/// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
+/// of megabytes (or more). Updates can be as small as a few hundred bytes.
+///
+/// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
+/// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
+/// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
+/// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
+/// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
+/// sentinel bytes.
+///
+/// # Storing monitors
+///
+/// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
+///
+///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
+///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
+///
+/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
+/// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
+///
+/// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
+///
+///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
+///   - secondary namespace: [the monitor's encoded outpoint name]
+///
+/// Under that secondary namespace, each update is stored with a number string, like `21`, which
+/// represents its `update_id` value.
+///
+/// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
+///
+///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
+///   - Index: `1`
+///
+/// Full channel monitors would be stored at a single key:
+///
+/// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
+///
+/// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
+///
+/// ```text
+/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
+/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
+/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
+/// ```
+/// ... and so on.
+///
+/// # Reading channel state from storage
+///
+/// Channel state can be reconstructed by calling
+/// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
+/// list channel monitors themselves and load channels individually using
+/// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
+/// 
+/// ## EXTREMELY IMPORTANT
+/// 
+/// It is extremely important that your [`KVStore::read`] implementation uses the
+/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
+/// that circumstance (not when there is really a permissions error, for example). This is because
+/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
+/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
+/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
+///
+/// # Pruning stale channel updates
+///
+/// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
+/// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
+/// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
+/// will complete. However, stale updates are not a problem for data integrity, since updates are
+/// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
+///
+/// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
+/// would like to get rid of them, consider using the
+/// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
+pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
+where
+       K::Target: KVStore,
+       L::Target: Logger,
+       ES::Target: EntropySource + Sized,
+       SP::Target: SignerProvider + Sized,
+{
+       kv_store: K,
+       logger: L,
+       maximum_pending_updates: u64,
+       entropy_source: ES,
+       signer_provider: SP,
+}
+
+#[allow(dead_code)]
+impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
+       MonitorUpdatingPersister<K, L, ES, SP>
+where
+       K::Target: KVStore,
+       L::Target: Logger,
+       ES::Target: EntropySource + Sized,
+       SP::Target: SignerProvider + Sized,
+{
+       /// Constructs a new [`MonitorUpdatingPersister`].
+       ///
+       /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
+       /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
+       /// consolidation will frequently occur with fewer updates than what you set here; this number
+       /// is merely the maximum that may be stored. When setting this value, consider that for higher
+       /// values of `maximum_pending_updates`:
+       /// 
+       ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
+       /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
+       /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
+       ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
+       /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
+       /// less frequent "waves."
+       ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
+       /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
+       pub fn new(
+               kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
+               signer_provider: SP,
+       ) -> Self
+       where
+               ES::Target: EntropySource + Sized,
+               SP::Target: SignerProvider + Sized,
+       {
+               MonitorUpdatingPersister {
+                       kv_store,
+                       logger,
+                       maximum_pending_updates,
+                       entropy_source,
+                       signer_provider,
+               }
+       }
+
+       /// Reads all stored channel monitors, along with any stored updates for them.
+       ///
+       /// It is extremely important that your [`KVStore::read`] implementation uses the
+       /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
+       /// documentation for [`MonitorUpdatingPersister`].
+       pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
+               &self, broadcaster: B, fee_estimator: F,
+       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+       where
+               ES::Target: EntropySource + Sized,
+               SP::Target: SignerProvider + Sized,
+               B::Target: BroadcasterInterface,
+               F::Target: FeeEstimator,
+       {
+               let monitor_list = self.kv_store.list(
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+               )?;
+               let mut res = Vec::with_capacity(monitor_list.len());
+               for monitor_key in monitor_list {
+                       res.push(self.read_channel_monitor_with_updates(
+                               &broadcaster,
+                               fee_estimator.clone(),
+                               monitor_key,
+                       )?)
+               }
+               Ok(res)
+       }
+
+       /// Read a single channel monitor, along with any stored updates for it.
+       ///
+       /// It is extremely important that your [`KVStore::read`] implementation uses the
+       /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
+       /// documentation for [`MonitorUpdatingPersister`].
+       ///
+       /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
+       /// [`OutPoint`], with an underscore `_` between them. For example, given:
+       ///
+       ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
+       ///   - Index: `1`
+       ///
+       /// The correct `monitor_key` would be:
+       /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
+       /// 
+       /// Loading a large number of monitors will be faster if done in parallel. You can use this
+       /// function to accomplish this. Take care to limit the number of parallel readers.
+       pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
+               &self, broadcaster: &B, fee_estimator: F, monitor_key: String,
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
+       where
+               ES::Target: EntropySource + Sized,
+               SP::Target: SignerProvider + Sized,
+               B::Target: BroadcasterInterface,
+               F::Target: FeeEstimator,
+       {
+               let monitor_name = MonitorName::new(monitor_key)?;
+               let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
+               let mut current_update_id = monitor.get_latest_update_id();
+               loop {
+                       current_update_id = match current_update_id.checked_add(1) {
+                               Some(next_update_id) => next_update_id,
+                               None => break,
+                       };
+                       let update_name = UpdateName::from(current_update_id);
+                       let update = match self.read_monitor_update(&monitor_name, &update_name) {
+                               Ok(update) => update,
+                               Err(err) if err.kind() == io::ErrorKind::NotFound => {
+                                       // We can't find any more updates, so we are done.
+                                       break;
+                               }
+                               Err(err) => return Err(err),
+                       };
+
+                       monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
+                               .map_err(|e| {
+                                       log_error!(
+                                               self.logger,
+                                               "Monitor update failed. monitor: {} update: {} reason: {:?}",
+                                               monitor_name.as_str(),
+                                               update_name.as_str(),
+                                               e
+                                       );
+                                       io::Error::new(io::ErrorKind::Other, "Monitor update failed")
+                               })?;
+               }
+               Ok((block_hash, monitor))
+       }
+
+       /// Read a channel monitor.
+       fn read_monitor(
+               &self, monitor_name: &MonitorName,
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
+               let outpoint: OutPoint = monitor_name.try_into()?;
+               let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+                       monitor_name.as_str(),
+               )?);
+               // Discard the sentinel bytes if found.
+               if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
+                       monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
+               }
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+                       &mut monitor_cursor,
+                       (&*self.entropy_source, &*self.signer_provider),
+               ) {
+                       Ok((blockhash, channel_monitor)) => {
+                               if channel_monitor.get_funding_txo().0.txid != outpoint.txid
+                                       || channel_monitor.get_funding_txo().0.index != outpoint.index
+                               {
+                                       log_error!(
+                                               self.logger,
+                                               "ChannelMonitor {} was stored under the wrong key!",
+                                               monitor_name.as_str()
+                                       );
+                                       Err(io::Error::new(
+                                               io::ErrorKind::InvalidData,
+                                               "ChannelMonitor was stored under the wrong key",
+                                       ))
+                               } else {
+                                       Ok((blockhash, channel_monitor))
+                               }
+                       }
+                       Err(e) => {
+                               log_error!(
+                                       self.logger,
+                                       "Failed to read ChannelMonitor {}, reason: {}",
+                                       monitor_name.as_str(),
+                                       e,
+                               );
+                               Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
+                       }
+               }
+       }
+
+       /// Read a channel monitor update.
+       fn read_monitor_update(
+               &self, monitor_name: &MonitorName, update_name: &UpdateName,
+       ) -> Result<ChannelMonitorUpdate, io::Error> {
+               let update_bytes = self.kv_store.read(
+                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                       monitor_name.as_str(),
+                       update_name.as_str(),
+               )?;
+               ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
+                       log_error!(
+                               self.logger,
+                               "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
+                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                               monitor_name.as_str(),
+                               update_name.as_str(),
+                               e,
+                       );
+                       io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
+               })
+       }
+
+       /// Cleans up stale updates for all monitors.
+       ///
+       /// This function works by first listing all monitors, and then for each of them, listing all
+       /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
+       /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
+       /// be passed to [`KVStore::remove`].
+       pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
+               let monitor_keys = self.kv_store.list(
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+               )?;
+               for monitor_key in monitor_keys {
+                       let monitor_name = MonitorName::new(monitor_key)?;
+                       let (_, current_monitor) = self.read_monitor(&monitor_name)?;
+                       let updates = self
+                               .kv_store
+                               .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
+                       for update in updates {
+                               let update_name = UpdateName::new(update)?;
+                               // if the update_id is lower than the stored monitor, delete
+                               if update_name.0 <= current_monitor.get_latest_update_id() {
+                                       self.kv_store.remove(
+                                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                                               monitor_name.as_str(),
+                                               update_name.as_str(),
+                                               lazy,
+                                       )?;
+                               }
+                       }
+               }
+               Ok(())
+       }
+}
+
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref> 
+       Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
+where
+       K::Target: KVStore,
+       L::Target: Logger,
+       ES::Target: EntropySource + Sized,
+       SP::Target: SignerProvider + Sized,
+{
+       /// Persists a new channel. This means writing the entire monitor to the
+       /// parametrized [`KVStore`].
+       fn persist_new_channel(
+               &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
+               _monitor_update_call_id: MonitorUpdateId,
+       ) -> chain::ChannelMonitorUpdateStatus {
+               // Determine the proper key for this monitor
+               let monitor_name = MonitorName::from(funding_txo);
+               let maybe_old_monitor = self.read_monitor(&monitor_name);
+               match maybe_old_monitor {
+                       Ok((_, ref old_monitor)) => {
+                               // Check that this key isn't already storing a monitor with a higher update_id
+                               // (collision)
+                               if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
+                                       log_error!(
+                                               self.logger,
+                                               "Tried to write a monitor at the same outpoint {} with a higher update_id!",
+                                               monitor_name.as_str()
+                                       );
+                                       return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
+                               }
+                       }
+                       // This means the channel monitor is new.
+                       Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
+                       _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
+               }
+               // Serialize and write the new monitor
+               let mut monitor_bytes = Vec::with_capacity(
+                       MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
+               );
+               monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
+               monitor.write(&mut monitor_bytes).unwrap();
+               match self.kv_store.write(
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+                       monitor_name.as_str(),
+                       &monitor_bytes,
+               ) {
+                       Ok(_) => {
+                               // Assess cleanup. Typically, we'll clean up only between the last two known full
+                               // monitors.
+                               if let Ok((_, old_monitor)) = maybe_old_monitor {
+                                       let start = old_monitor.get_latest_update_id();
+                                       let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
+                                               // We don't want to clean the rest of u64, so just do possible pending
+                                               // updates. Note that we never write updates at
+                                               // `CLOSED_CHANNEL_UPDATE_ID`.
+                                               cmp::min(
+                                                       start.saturating_add(self.maximum_pending_updates),
+                                                       CLOSED_CHANNEL_UPDATE_ID - 1,
+                                               )
+                                       } else {
+                                               monitor.get_latest_update_id().saturating_sub(1)
+                                       };
+                                       // We should bother cleaning up only if there's at least one update
+                                       // expected.
+                                       for update_id in start..=end {
+                                               let update_name = UpdateName::from(update_id);
+                                               #[cfg(debug_assertions)]
+                                               {
+                                                       if let Ok(update) =
+                                                               self.read_monitor_update(&monitor_name, &update_name)
+                                                       {
+                                                               // Assert that we are reading what we think we are.
+                                                               debug_assert_eq!(update.update_id, update_name.0);
+                                                       } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
+                                                       {
+                                                               // We're deleting something we should know doesn't exist.
+                                                               panic!(
+                                                                       "failed to read monitor update {}",
+                                                                       update_name.as_str()
+                                                               );
+                                                       }
+                                                       // On closed channels, we will unavoidably try to read
+                                                       // non-existent updates since we have to guess at the range of
+                                                       // stale updates, so do nothing.
+                                               }
+                                               if let Err(e) = self.kv_store.remove(
+                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                                                       monitor_name.as_str(),
+                                                       update_name.as_str(),
+                                                       true,
+                                               ) {
+                                                       log_error!(
+                                                               self.logger,
+                                                               "error cleaning up channel monitor updates for monitor {}, reason: {}",
+                                                               monitor_name.as_str(),
+                                                               e
+                                                       );
+                                               };
+                                       }
+                               };
+                               chain::ChannelMonitorUpdateStatus::Completed
+                       }
+                       Err(e) => {
+                               log_error!(
+                                       self.logger,
+                                       "error writing channel monitor {}/{}/{} reason: {}",
+                                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+                                       monitor_name.as_str(),
+                                       e
+                               );
+                               chain::ChannelMonitorUpdateStatus::UnrecoverableError
+                       }
+               }
+       }
+
+       /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
+       ///
+       /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
+       ///
+       ///   - No full monitor is found in [`KVStore`]
+       ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
+       ///   - LDK commands re-persisting the entire monitor through this function, specifically when
+       ///     `update` is `None`.
+       ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
+       fn update_persisted_channel(
+               &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
+               monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
+       ) -> chain::ChannelMonitorUpdateStatus {
+               // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
+               // ChannelMonitorUpdate's update_id.
+               if let Some(update) = update {
+                       if update.update_id != CLOSED_CHANNEL_UPDATE_ID
+                               && update.update_id % self.maximum_pending_updates != 0
+                       {
+                               let monitor_name = MonitorName::from(funding_txo);
+                               let update_name = UpdateName::from(update.update_id);
+                               match self.kv_store.write(
+                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                                       monitor_name.as_str(),
+                                       update_name.as_str(),
+                                       &update.encode(),
+                               ) {
+                                       Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
+                                       Err(e) => {
+                                               log_error!(
+                                                       self.logger,
+                                                       "error writing channel monitor update {}/{}/{} reason: {}",
+                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                                                       monitor_name.as_str(),
+                                                       update_name.as_str(),
+                                                       e
+                                               );
+                                               chain::ChannelMonitorUpdateStatus::UnrecoverableError
+                                       }
+                               }
+                       } else {
+                               // We could write this update, but it meets criteria of our design that call for a full monitor write.
+                               self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+                       }
+               } else {
+                       // There is no update given, so we must persist a new monitor.
+                       self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+               }
+       }
+}
+
+/// A struct representing a name for a monitor.
+#[derive(Debug)]
+struct MonitorName(String);
+
+impl MonitorName {
+       /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
+       /// be formed from the given `name`.
+       pub fn new(name: String) -> Result<Self, io::Error> {
+               MonitorName::do_try_into_outpoint(&name)?;
+               Ok(Self(name))
+       }
+       /// Convert this monitor name to a str.
+       pub fn as_str(&self) -> &str {
+               &self.0
+       }
+       /// Attempt to form a valid [`OutPoint`] from a given name string.
+       fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
+               let mut parts = name.splitn(2, '_');
+               let txid = if let Some(part) = parts.next() {
+                       Txid::from_hex(part).map_err(|_| {
+                               io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
+                       })?
+               } else {
+                       return Err(io::Error::new(
+                               io::ErrorKind::InvalidData,
+                               "Stored monitor key is not a splittable string",
+                       ));
+               };
+               let index = if let Some(part) = parts.next() {
+                       part.parse().map_err(|_| {
+                               io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
+                       })?
+               } else {
+                       return Err(io::Error::new(
+                               io::ErrorKind::InvalidData,
+                               "No tx index value found after underscore in stored key",
+                       ));
+               };
+               Ok(OutPoint { txid, index })
+       }
+}
+
+impl TryFrom<&MonitorName> for OutPoint {
+       type Error = io::Error;
+
+       fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
+               MonitorName::do_try_into_outpoint(&value.0)
+       }
+}
+
+impl From<OutPoint> for MonitorName {
+       fn from(value: OutPoint) -> Self {
+               MonitorName(format!("{}_{}", value.txid.to_hex(), value.index))
+       }
+}
+
+/// A struct representing a name for an update.
+#[derive(Debug)]
+struct UpdateName(u64, String);
+
+impl UpdateName {
+       /// Constructs an [`UpdateName`], after verifying that an update sequence ID
+       /// can be derived from the given `name`.
+       pub fn new(name: String) -> Result<Self, io::Error> {
+               match name.parse::<u64>() {
+                       Ok(u) => Ok(u.into()),
+                       Err(_) => {
+                               Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
+                       }
+               }
+       }
+
+       /// Convert this monitor update name to a &str
+       pub fn as_str(&self) -> &str {
+               &self.1
+       }
+}
+
+impl From<u64> for UpdateName {
+       fn from(value: u64) -> Self {
+               Self(value, value.to_string())
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+       use crate::chain::chainmonitor::Persist;
+       use crate::chain::ChannelMonitorUpdateStatus;
+       use crate::events::{ClosureReason, MessageSendEventsProvider};
+       use crate::ln::functional_test_utils::*;
+       use crate::util::test_utils::{self, TestLogger, TestStore};
+       use crate::{check_added_monitors, check_closed_broadcast};
+
+       const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
+
+       #[test]
+       fn converts_u64_to_update_name() {
+               assert_eq!(UpdateName::from(0).as_str(), "0");
+               assert_eq!(UpdateName::from(21).as_str(), "21");
+               assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
+       }
+
+       #[test]
+       fn bad_update_name_fails() {
+               assert!(UpdateName::new("deadbeef".to_string()).is_err());
+               assert!(UpdateName::new("-1".to_string()).is_err());
+       }
+
+       #[test]
+       fn monitor_from_outpoint_works() {
+               let monitor_name1 = MonitorName::from(OutPoint {
+                       txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
+                       index: 1,
+               });
+               assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
+
+               let monitor_name2 = MonitorName::from(OutPoint {
+                       txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
+                       index: u16::MAX,
+               });
+               assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
+       }
+
+       #[test]
+       fn bad_monitor_string_fails() {
+               assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
+               assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
+               assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
+       }
+
+       // Exercise the `MonitorUpdatingPersister` with real channels and payments.
+       #[test]
+       fn persister_with_real_monitors() {
+               // This value is used later to limit how many iterations we perform.
+               let test_max_pending_updates = 7;
+               let chanmon_cfgs = create_chanmon_cfgs(4);
+               let persister_0 = MonitorUpdatingPersister {
+                       kv_store: &TestStore::new(false),
+                       logger: &TestLogger::new(),
+                       maximum_pending_updates: test_max_pending_updates,
+                       entropy_source: &chanmon_cfgs[0].keys_manager,
+                       signer_provider: &chanmon_cfgs[0].keys_manager,
+               };
+               let persister_1 = MonitorUpdatingPersister {
+                       kv_store: &TestStore::new(false),
+                       logger: &TestLogger::new(),
+                       // Intentionally set this to a smaller value to test a different alignment.
+                       maximum_pending_updates: 3,
+                       entropy_source: &chanmon_cfgs[1].keys_manager,
+                       signer_provider: &chanmon_cfgs[1].keys_manager,
+               };
+               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let chain_mon_0 = test_utils::TestChainMonitor::new(
+                       Some(&chanmon_cfgs[0].chain_source),
+                       &chanmon_cfgs[0].tx_broadcaster,
+                       &chanmon_cfgs[0].logger,
+                       &chanmon_cfgs[0].fee_estimator,
+                       &persister_0,
+                       &chanmon_cfgs[0].keys_manager,
+               );
+               let chain_mon_1 = test_utils::TestChainMonitor::new(
+                       Some(&chanmon_cfgs[1].chain_source),
+                       &chanmon_cfgs[1].tx_broadcaster,
+                       &chanmon_cfgs[1].logger,
+                       &chanmon_cfgs[1].fee_estimator,
+                       &persister_1,
+                       &chanmon_cfgs[1].keys_manager,
+               );
+               node_cfgs[0].chain_monitor = chain_mon_0;
+               node_cfgs[1].chain_monitor = chain_mon_1;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
+               let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
+
+               // Check that the persisted channel data is empty before any channels are
+               // open.
+               let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
+                       broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               assert_eq!(persisted_chan_data_0.len(), 0);
+               let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
+                       broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
+               assert_eq!(persisted_chan_data_1.len(), 0);
+
+               // Helper to make sure the channel is on the expected update ID.
+               macro_rules! check_persisted_data {
+                       ($expected_update_id: expr) => {
+                               persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
+                                       broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+                               // check that we stored only one monitor
+                               assert_eq!(persisted_chan_data_0.len(), 1);
+                               for (_, mon) in persisted_chan_data_0.iter() {
+                                       // check that when we read it, we got the right update id
+                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+                                       // if the CM is at the correct update id without updates, ensure no updates are stored
+                                       let monitor_name = MonitorName::from(mon.get_funding_txo().0);
+                                       let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
+                                       if cm_0.get_latest_update_id() == $expected_update_id {
+                                               assert_eq!(
+                                                       persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                                                               monitor_name.as_str()).unwrap().len(),
+                                                       0,
+                                                       "updates stored when they shouldn't be in persister 0"
+                                               );
+                                       }
+                               }
+                               persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
+                                       broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
+                               assert_eq!(persisted_chan_data_1.len(), 1);
+                               for (_, mon) in persisted_chan_data_1.iter() {
+                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+                                       let monitor_name = MonitorName::from(mon.get_funding_txo().0);
+                                       let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
+                                       if cm_1.get_latest_update_id() == $expected_update_id {
+                                               assert_eq!(
+                                                       persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                                                               monitor_name.as_str()).unwrap().len(),
+                                                       0,
+                                                       "updates stored when they shouldn't be in persister 1"
+                                               );
+                                       }
+                               }
+                       };
+               }
+
+               // Create some initial channel and check that a channel was persisted.
+               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+               check_persisted_data!(0);
+
+               // Send a few payments and make sure the monitors are updated to the latest.
+               send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
+               check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
+               send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
+               check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
+
+               // Send a few more payments to try all the alignments of max pending updates with
+               // updates for a payment sent and received.
+               let mut sender = 0;
+               for i in 3..=test_max_pending_updates * 2 {
+                       let receiver;
+                       if sender == 0 {
+                               sender = 1;
+                               receiver = 0;
+                       } else {
+                               sender = 0;
+                               receiver = 1;
+                       }
+                       send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
+                       check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
+               }
+
+               // Force close because cooperative close doesn't result in any persisted
+               // updates.
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+
+               check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+               check_closed_broadcast!(nodes[0], true);
+               check_added_monitors!(nodes[0], 1);
+
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(node_txn.len(), 1);
+
+               connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
+
+               check_closed_broadcast!(nodes[1], true);
+               check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
+               check_added_monitors!(nodes[1], 1);
+
+               // Make sure everything is persisted as expected after close.
+               check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
+
+               // Make sure the expected number of stale updates is present.
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let (_, monitor) = &persisted_chan_data[0];
+               let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
+               // The channel should have 0 updates, as it wrote a full monitor and consolidated.
+               assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
+               assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
+       }
+
+       // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
+       // monitor or update with it results in the persister returning an UnrecoverableError status.
+       #[test]
+       fn unrecoverable_error_on_write_failure() {
+               // Set up a dummy channel and force close. This will produce a monitor
+               // that we can then use to test persistence.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+               {
+                       let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+                       let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+                       let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+                       let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
+                       let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
+                       let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
+
+                       let ro_persister = MonitorUpdatingPersister {
+                               kv_store: &TestStore::new(true),
+                               logger: &TestLogger::new(),
+                               maximum_pending_updates: 11,
+                               entropy_source: node_cfgs[0].keys_manager,
+                               signer_provider: node_cfgs[0].keys_manager,
+                       };
+                       match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+                               ChannelMonitorUpdateStatus::UnrecoverableError => {
+                                       // correct result
+                               }
+                               ChannelMonitorUpdateStatus::Completed => {
+                                       panic!("Completed persisting new channel when shouldn't have")
+                               }
+                               ChannelMonitorUpdateStatus::InProgress => {
+                                       panic!("Returned InProgress when shouldn't have")
+                               }
+                       }
+                       match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
+                               ChannelMonitorUpdateStatus::UnrecoverableError => {
+                                       // correct result
+                               }
+                               ChannelMonitorUpdateStatus::Completed => {
+                                       panic!("Completed persisting new channel when shouldn't have")
+                               }
+                               ChannelMonitorUpdateStatus::InProgress => {
+                                       panic!("Returned InProgress when shouldn't have")
+                               }
+                       }
+                       added_monitors.clear();
+               }
+               nodes[1].node.get_and_clear_pending_msg_events();
+       }
+
+       // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
+       #[test]
+       fn clean_stale_updates_works() {
+               let test_max_pending_updates = 7;
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let persister_0 = MonitorUpdatingPersister {
+                       kv_store: &TestStore::new(false),
+                       logger: &TestLogger::new(),
+                       maximum_pending_updates: test_max_pending_updates,
+                       entropy_source: &chanmon_cfgs[0].keys_manager,
+                       signer_provider: &chanmon_cfgs[0].keys_manager,
+               };
+               let persister_1 = MonitorUpdatingPersister {
+                       kv_store: &TestStore::new(false),
+                       logger: &TestLogger::new(),
+                       maximum_pending_updates: test_max_pending_updates,
+                       entropy_source: &chanmon_cfgs[1].keys_manager,
+                       signer_provider: &chanmon_cfgs[1].keys_manager,
+               };
+               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let chain_mon_0 = test_utils::TestChainMonitor::new(
+                       Some(&chanmon_cfgs[0].chain_source),
+                       &chanmon_cfgs[0].tx_broadcaster,
+                       &chanmon_cfgs[0].logger,
+                       &chanmon_cfgs[0].fee_estimator,
+                       &persister_0,
+                       &chanmon_cfgs[0].keys_manager,
+               );
+               let chain_mon_1 = test_utils::TestChainMonitor::new(
+                       Some(&chanmon_cfgs[1].chain_source),
+                       &chanmon_cfgs[1].tx_broadcaster,
+                       &chanmon_cfgs[1].logger,
+                       &chanmon_cfgs[1].fee_estimator,
+                       &persister_1,
+                       &chanmon_cfgs[1].keys_manager,
+               );
+               node_cfgs[0].chain_monitor = chain_mon_0;
+               node_cfgs[1].chain_monitor = chain_mon_1;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
+
+               // Check that the persisted channel data is empty before any channels are
+               // open.
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               assert_eq!(persisted_chan_data.len(), 0);
+
+               // Create some initial channel
+               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               // Send a few payments to advance the updates a bit
+               send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
+               send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
+
+               // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let (_, monitor) = &persisted_chan_data[0];
+               let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
+               persister_0
+                       .kv_store
+                       .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
+                       .unwrap();
+
+               // Do the stale update cleanup
+               persister_0.cleanup_stale_updates(false).unwrap();
+
+               // Confirm the stale update is unreadable/gone
+               assert!(persister_0
+                       .kv_store
+                       .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
+                       .is_err());
+
+               // Force close.
+               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+               check_closed_broadcast!(nodes[0], true);
+               check_added_monitors!(nodes[0], 1);
+
+               // Write an update near u64::MAX
+               persister_0
+                       .kv_store
+                       .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
+                       .unwrap();
+
+               // Do the stale update cleanup
+               persister_0.cleanup_stale_updates(false).unwrap();
+
+               // Confirm the stale update is unreadable/gone
+               assert!(persister_0
+                       .kv_store
+                       .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
+                       .is_err());
+       }
+}
index d53cd39b119fac08d8ebb151204d9c4e53a2a4a3..18c3db76f18efac38cb0ebf529f42c32b7ef4757 100644 (file)
@@ -124,6 +124,7 @@ impl<'a> Router for TestRouter<'a> {
                if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
                        assert_eq!(find_route_query, *params);
                        if let Ok(ref route) = find_route_res {
+                               assert_eq!(route.route_params.as_ref().unwrap().final_value_msat, find_route_query.final_value_msat);
                                let scorer = self.scorer.read().unwrap();
                                let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
                                for path in &route.paths {
@@ -440,12 +441,12 @@ impl TestStore {
 }
 
 impl KVStore for TestStore {
-       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> {
+       fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> {
                let persisted_lock = self.persisted_bytes.lock().unwrap();
-               let prefixed = if sub_namespace.is_empty() {
-                       namespace.to_string()
+               let prefixed = if secondary_namespace.is_empty() {
+                       primary_namespace.to_string()
                } else {
-                       format!("{}/{}", namespace, sub_namespace)
+                       format!("{}/{}", primary_namespace, secondary_namespace)
                };
 
                if let Some(outer_ref) = persisted_lock.get(&prefixed) {
@@ -460,7 +461,7 @@ impl KVStore for TestStore {
                }
        }
 
-       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> {
+       fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> {
                if self.read_only {
                        return Err(io::Error::new(
                                io::ErrorKind::PermissionDenied,
@@ -469,10 +470,10 @@ impl KVStore for TestStore {
                }
                let mut persisted_lock = self.persisted_bytes.lock().unwrap();
 
-               let prefixed = if sub_namespace.is_empty() {
-                       namespace.to_string()
+               let prefixed = if secondary_namespace.is_empty() {
+                       primary_namespace.to_string()
                } else {
-                       format!("{}/{}", namespace, sub_namespace)
+                       format!("{}/{}", primary_namespace, secondary_namespace)
                };
                let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new());
                let mut bytes = Vec::new();
@@ -481,7 +482,7 @@ impl KVStore for TestStore {
                Ok(())
        }
 
-       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> {
+       fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> {
                if self.read_only {
                        return Err(io::Error::new(
                                io::ErrorKind::PermissionDenied,
@@ -491,10 +492,10 @@ impl KVStore for TestStore {
 
                let mut persisted_lock = self.persisted_bytes.lock().unwrap();
 
-               let prefixed = if sub_namespace.is_empty() {
-                       namespace.to_string()
+               let prefixed = if secondary_namespace.is_empty() {
+                       primary_namespace.to_string()
                } else {
-                       format!("{}/{}", namespace, sub_namespace)
+                       format!("{}/{}", primary_namespace, secondary_namespace)
                };
                if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) {
                                outer_ref.remove(&key.to_string());
@@ -503,13 +504,13 @@ impl KVStore for TestStore {
                Ok(())
        }
 
-       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> {
+       fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> {
                let mut persisted_lock = self.persisted_bytes.lock().unwrap();
 
-               let prefixed = if sub_namespace.is_empty() {
-                       namespace.to_string()
+               let prefixed = if secondary_namespace.is_empty() {
+                       primary_namespace.to_string()
                } else {
-                       format!("{}/{}", namespace, sub_namespace)
+                       format!("{}/{}", primary_namespace, secondary_namespace)
                };
                match persisted_lock.entry(prefixed) {
                        hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()),
index d96fd69371b32bef578fe7a0d7bd143fa864df5c..3fe949500e6ecd16852023a88eac96bb43d21441 100644 (file)
@@ -1,3 +1,3 @@
 ## Backwards Compatibility
 
-* Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
+* Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
diff --git a/pending_changelog/monitorupdatingpersister.txt b/pending_changelog/monitorupdatingpersister.txt
new file mode 100644 (file)
index 0000000..24d63ff
--- /dev/null
@@ -0,0 +1,5 @@
+## Backwards Compatibility
+
+* The `MonitorUpdatingPersister` can read monitors stored conventionally, such as with the `KVStorePersister` from previous LDK versions. You can use this to migrate _to_ the `MonitorUpdatingPersister`; just "point" `MonitorUpdatingPersister` to existing, fully updated `ChannelMonitors`, and it will read them and work from there. However, downgrading is more complex. Monitors stored with `MonitorUpdatingPersister` have a prepended sentinel value that prevents them from being deserialized by previous `Persist` implementations. This is to ensure that they are not accidentally read and used while pending updates are still stored and not applied, as this could result in penalty transactions. Users who wish to downgrade should perform the following steps:
+  * Make a backup copy of all channel state.
+  * Ensure all updates are applied to the monitors. This may be done by loading all the existing data with the `MonitorUpdatingPersister::read_all_channel_monitors_with_updates` function. You can then write the resulting `ChannelMonitor`s using your previous `Persist` implementation.
\ No newline at end of file