Merge pull request #2176 from TheBlueMatt/2023-04-expose-success-prob
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 15 Sep 2023 22:38:57 +0000 (22:38 +0000)
committerGitHub <noreply@github.com>
Fri, 15 Sep 2023 22:38:57 +0000 (22:38 +0000)
Move the historical bucket tracker to 32 unequal sized buckets

85 files changed:
bench/benches/bench.rs
ci/ci-tests.sh
fuzz/src/base32.rs [new file with mode: 0644]
fuzz/src/bin/base32_target.rs [new file with mode: 0644]
fuzz/src/bin/fromstr_to_netaddress_target.rs [new file with mode: 0644]
fuzz/src/bin/gen_target.sh
fuzz/src/chanmon_consistency.rs
fuzz/src/chanmon_deser.rs
fuzz/src/fromstr_to_netaddress.rs [new file with mode: 0644]
fuzz/src/full_stack.rs
fuzz/src/lib.rs
fuzz/src/onion_hop_data.rs
fuzz/src/onion_message.rs
fuzz/src/router.rs
fuzz/src/utils/test_persister.rs
fuzz/src/zbase32.rs
fuzz/targets.h
lightning-background-processor/src/lib.rs
lightning-invoice/src/lib.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/fs_store.rs [new file with mode: 0644]
lightning-persister/src/lib.rs
lightning-persister/src/test_utils.rs [new file with mode: 0644]
lightning-persister/src/util.rs [deleted file]
lightning-persister/src/utils.rs [new file with mode: 0644]
lightning-transaction-sync/tests/integration_tests.rs
lightning/src/blinded_path/mod.rs
lightning/src/blinded_path/payment.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/transaction.rs
lightning/src/events/bump_transaction.rs
lightning/src/events/mod.rs
lightning/src/ln/blinded_payment_tests.rs [new file with mode: 0644]
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channel_id.rs [new file with mode: 0644]
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/inbound_payment.rs
lightning/src/ln/mod.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/offers/invoice.rs
lightning/src/offers/invoice_request.rs
lightning/src/offers/mod.rs
lightning/src/offers/offer.rs
lightning/src/offers/refund.rs
lightning/src/offers/signer.rs
lightning/src/offers/test_utils.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/base32.rs [new file with mode: 0644]
lightning/src/util/chacha20.rs
lightning/src/util/crypto.rs
lightning/src/util/enforcing_trait_impls.rs [deleted file]
lightning/src/util/macro_logger.rs
lightning/src/util/message_signing.rs
lightning/src/util/mod.rs
lightning/src/util/persist.rs
lightning/src/util/test_channel_signer.rs [new file with mode: 0644]
lightning/src/util/test_utils.rs
lightning/src/util/zbase32.rs [deleted file]
pending_changelog/1-hop-bps.txt [new file with mode: 0644]
pending_changelog/invoice_request_failed_downgrade.txt [new file with mode: 0644]
pending_changelog/kvstore.txt [new file with mode: 0644]
pending_changelog/move_netaddress_to_socketaddress.txt [new file with mode: 0644]
pending_changelog/new_channel_id_type_pr_2485.txt [new file with mode: 0644]
pending_changelog/routes_route_params.txt [new file with mode: 0644]

index 54799f44c951422bf8f81bafd88e62dee1e4355c..bc4bd010822ddc24d84c3d761f7da63d8e7fad99 100644 (file)
@@ -15,7 +15,7 @@ criterion_group!(benches,
        lightning::routing::router::benches::generate_large_mpp_routes_with_probabilistic_scorer,
        lightning::sign::benches::bench_get_secure_random_bytes,
        lightning::ln::channelmanager::bench::bench_sends,
-       lightning_persister::bench::bench_sends,
+       lightning_persister::fs_store::bench::bench_sends,
        lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
        lightning::routing::gossip::benches::read_network_graph,
        lightning::routing::gossip::benches::write_network_graph);
index ef9ecf7b86d8653d5bdf1b7ab758032425bd33d6..7b925cc8544eca2a077ab7a5b3aa9f2af27c3398 100755 (executable)
@@ -29,9 +29,15 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
 # The quote crate switched to Rust edition 2021 starting with v1.0.31, i.e., has MSRV of 1.56
 [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p quote --precise "1.0.30" --verbose
 
+# The syn crate depends on too-new proc-macro2 starting with v2.0.33, i.e., has MSRV of 1.56
+[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p syn:2.0.33 --precise "2.0.32" --verbose
+
 # The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
 [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
 
+# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
+[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
+
 [ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
 
 export RUST_BACKTRACE=1
@@ -117,6 +123,10 @@ if [[ $RUSTC_MINOR_VERSION -gt 67 ]]; then
        # lightning-transaction-sync's MSRV is 1.67
        cargo check --verbose --color always --features lightning-transaction-sync
 else
+       # The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
+       # This is currently only a release dependency via core2, which we intend to work with
+       # rust-bitcoin to remove soon.
+       [ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
        cargo check --verbose --color always
 fi
 popd
diff --git a/fuzz/src/base32.rs b/fuzz/src/base32.rs
new file mode 100644 (file)
index 0000000..8171f19
--- /dev/null
@@ -0,0 +1,52 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use lightning::util::base32;
+
+use crate::utils::test_logger;
+
+#[inline]
+pub fn do_test(data: &[u8]) {
+       if let Ok(s) = std::str::from_utf8(data) {
+               let first_decoding = base32::Alphabet::RFC4648 { padding: true }.decode(s);
+               if let Ok(first_decoding) = first_decoding {
+                       let encoding_response = base32::Alphabet::RFC4648 { padding: true }.encode(&first_decoding);
+                       assert_eq!(encoding_response, s.to_ascii_uppercase());
+                       let second_decoding = base32::Alphabet::RFC4648 { padding: true }.decode(&encoding_response).unwrap();
+                       assert_eq!(first_decoding, second_decoding);
+               }
+       }
+
+       if let Ok(s) = std::str::from_utf8(data) {
+               let first_decoding = base32::Alphabet::RFC4648 { padding: false }.decode(s);
+               if let Ok(first_decoding) = first_decoding {
+                       let encoding_response = base32::Alphabet::RFC4648 { padding: false }.encode(&first_decoding);
+                       assert_eq!(encoding_response, s.to_ascii_uppercase());
+                       let second_decoding = base32::Alphabet::RFC4648 { padding: false }.decode(&encoding_response).unwrap();
+                       assert_eq!(first_decoding, second_decoding);
+               }
+       }
+       
+       let encode_response = base32::Alphabet::RFC4648 { padding: false }.encode(&data);
+       let decode_response = base32::Alphabet::RFC4648 { padding: false }.decode(&encode_response).unwrap();
+       assert_eq!(data, decode_response);
+
+       let encode_response = base32::Alphabet::RFC4648 { padding: true }.encode(&data);
+       let decode_response = base32::Alphabet::RFC4648 { padding: true }.decode(&encode_response).unwrap();
+       assert_eq!(data, decode_response);
+}
+
+pub fn base32_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       do_test(data);
+}
+
+#[no_mangle]
+pub extern "C" fn base32_run(data: *const u8, datalen: usize) {
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
+}
diff --git a/fuzz/src/bin/base32_target.rs b/fuzz/src/bin/base32_target.rs
new file mode 100644 (file)
index 0000000..a7951c7
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::base32::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               base32_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       base32_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       base32_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       base32_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               base32_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/base32") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               base32_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
diff --git a/fuzz/src/bin/fromstr_to_netaddress_target.rs b/fuzz/src/bin/fromstr_to_netaddress_target.rs
new file mode 100644 (file)
index 0000000..29c984e
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::fromstr_to_netaddress::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               fromstr_to_netaddress_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       fromstr_to_netaddress_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       fromstr_to_netaddress_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       fromstr_to_netaddress_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               fromstr_to_netaddress_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/fromstr_to_netaddress") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               fromstr_to_netaddress_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
index fe17e4bab8ff356c599587446a65ec6c1033528a..2fa7debdf468670d5cf1da0e7ba9fa215df94c31 100755 (executable)
@@ -21,6 +21,8 @@ GEN_TEST router
 GEN_TEST zbase32
 GEN_TEST indexedmap
 GEN_TEST onion_hop_data
+GEN_TEST base32
+GEN_TEST fromstr_to_netaddress
 
 GEN_TEST msg_accept_channel msg_targets::
 GEN_TEST msg_announcement_signatures msg_targets::
index 296b3a03e9c1b85b8df7567efb93a6e3b3125937..8afc2e15187edb5397474a64b2cf33e34359a667 100644 (file)
@@ -46,7 +46,7 @@ use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::errors::APIError;
 use lightning::util::logger::Logger;
 use lightning::util::config::UserConfig;
@@ -118,14 +118,13 @@ struct TestChainMonitor {
        pub logger: Arc<dyn Logger>,
        pub keys: Arc<KeyProvider>,
        pub persister: Arc<TestPersister>,
-       pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       pub chain_monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
        // logic will automatically force-close our channels for us (as we don't have an up-to-date
        // monitor implying we are not able to punish misbehaving counterparties). Because this test
        // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
        // fully-serialized monitor state here, as well as the corresponding update_id.
        pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
-       pub should_update_manager: atomic::AtomicBool,
 }
 impl TestChainMonitor {
        pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
@@ -135,18 +134,16 @@ impl TestChainMonitor {
                        keys,
                        persister,
                        latest_monitors: Mutex::new(HashMap::new()),
-                       should_update_manager: atomic::AtomicBool::new(false),
                }
        }
 }
-impl chain::Watch<EnforcingSigner> for TestChainMonitor {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
+impl chain::Watch<TestChannelSigner> for TestChainMonitor {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                let mut ser = VecWriter(Vec::new());
                monitor.write(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
                        panic!("Already had monitor pre-watch_channel");
                }
-               self.should_update_manager.store(true, atomic::Ordering::Relaxed);
                self.chain_monitor.watch_channel(funding_txo, monitor)
        }
 
@@ -156,13 +153,12 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                        hash_map::Entry::Occupied(entry) => entry,
                        hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
                };
-               let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
+               let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
                        read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
                deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
                let mut ser = VecWriter(Vec::new());
                deserialized_monitor.write(&mut ser).unwrap();
                map_entry.insert((update.update_id, ser.0));
-               self.should_update_manager.store(true, atomic::Ordering::Relaxed);
                self.chain_monitor.update_channel(funding_txo, update)
        }
 
@@ -234,7 +230,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
@@ -257,7 +253,7 @@ impl SignerProvider for KeyProvider {
                        channel_keys_id,
                );
                let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, revoked_commitment, false)
+               TestChannelSigner::new_with_revoked(keys, revoked_commitment, false)
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
@@ -266,7 +262,7 @@ impl SignerProvider for KeyProvider {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner {
+               Ok(TestChannelSigner {
                        inner,
                        state,
                        disable_revocation_policy_check: false,
@@ -376,7 +372,7 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p
                        fee_msat: amt,
                        cltv_expiry_delta: 200,
                }], blinded_tail: None }],
-               payment_params: None,
+               route_params: None,
        }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
                check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable);
                false
@@ -409,7 +405,7 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des
                        channel_features: middle.channel_features(),
                        fee_msat: first_hop_fee,
                        cltv_expiry_delta: 100,
-               },RouteHop {
+               }, RouteHop {
                        pubkey: dest.get_our_node_id(),
                        node_features: dest.node_features(),
                        short_channel_id: dest_chan_id,
@@ -417,7 +413,7 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des
                        fee_msat: amt,
                        cltv_expiry_delta: 200,
                }], blinded_tail: None }],
-               payment_params: None,
+               route_params: None,
        }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
                let sent_amt = amt + first_hop_fee;
                check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable);
@@ -477,7 +473,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        let mut monitors = HashMap::new();
                        let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
                        for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
-                               monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
+                               monitors.insert(outpoint, <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
                                chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
                        }
                        let mut monitor_refs = HashMap::new();
@@ -1101,11 +1097,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                if !chan_a_disconnected {
                                        nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
                                        chan_a_disconnected = true;
-                                       drain_msg_events_on_disconnect!(0);
-                               }
-                               if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
-                                       node_a_ser.0.clear();
-                                       nodes[0].write(&mut node_a_ser).unwrap();
+                                       push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
+                                       ab_events.clear();
+                                       ba_events.clear();
                                }
                                let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
                                nodes[0] = new_node_a;
@@ -1134,11 +1128,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                if !chan_b_disconnected {
                                        nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
                                        chan_b_disconnected = true;
-                                       drain_msg_events_on_disconnect!(2);
-                               }
-                               if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
-                                       node_c_ser.0.clear();
-                                       nodes[2].write(&mut node_c_ser).unwrap();
+                                       push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
+                                       bc_events.clear();
+                                       cb_events.clear();
                                }
                                let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
                                nodes[2] = new_node_c;
@@ -1304,15 +1296,18 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        _ => test_return!(),
                }
 
-               node_a_ser.0.clear();
-               nodes[0].write(&mut node_a_ser).unwrap();
-               monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
-               node_b_ser.0.clear();
-               nodes[1].write(&mut node_b_ser).unwrap();
-               monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
-               node_c_ser.0.clear();
-               nodes[2].write(&mut node_c_ser).unwrap();
-               monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
+               if nodes[0].get_and_clear_needs_persistence() == true {
+                       node_a_ser.0.clear();
+                       nodes[0].write(&mut node_a_ser).unwrap();
+               }
+               if nodes[1].get_and_clear_needs_persistence() == true {
+                       node_b_ser.0.clear();
+                       nodes[1].write(&mut node_b_ser).unwrap();
+               }
+               if nodes[2].get_and_clear_needs_persistence() == true {
+                       node_c_ser.0.clear();
+                       nodes[2].write(&mut node_c_ser).unwrap();
+               }
        }
 }
 
index 61744ace7c733419149455dc21ab70fe7e496b60..8d425357c96844b2b9766de612879750f4dceddc 100644 (file)
@@ -4,7 +4,7 @@
 use bitcoin::hash_types::BlockHash;
 
 use lightning::chain::channelmonitor;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::ser::{ReadableArgs, Writer, Writeable};
 use lightning::util::test_utils::OnlyReadsKeysInterface;
 
@@ -22,10 +22,10 @@ impl Writer for VecWriter {
 
 #[inline]
 pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
-       if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
+       if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
                let mut w = VecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
-               let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
+               let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
                assert!(latest_block_hash == deserialized_copy.0);
                assert!(monitor == deserialized_copy.1);
        }
diff --git a/fuzz/src/fromstr_to_netaddress.rs b/fuzz/src/fromstr_to_netaddress.rs
new file mode 100644 (file)
index 0000000..dba2d44
--- /dev/null
@@ -0,0 +1,31 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use lightning::ln::msgs::SocketAddress;
+use core::str::FromStr;
+
+use crate::utils::test_logger;
+
+#[inline]
+pub fn do_test(data: &[u8]) {
+       if let Ok(s) = std::str::from_utf8(data) {
+               let _ = SocketAddress::from_str(s);
+       }
+
+}
+
+pub fn fromstr_to_netaddress_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       do_test(data);
+}
+
+#[no_mangle]
+pub extern "C" fn fromstr_to_netaddress_run(data: *const u8, datalen: usize) {
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
+}
+
index cf8060ab6b647f73436c7dae1c7e216c3a400aad..d7167146cac18d90c58d31f617f5af677875bbbf 100644 (file)
@@ -34,7 +34,7 @@ use lightning::chain::chainmonitor;
 use lightning::chain::transaction::OutPoint;
 use lightning::sign::{InMemorySigner, Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
 use lightning::events::Event;
-use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
+use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentId, RecipientOnionFields, Retry};
 use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler};
 use lightning::ln::msgs::{self, DecodeError};
@@ -47,7 +47,7 @@ use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
 use lightning::util::config::{UserConfig, MaxDustHTLCExposure};
 use lightning::util::errors::APIError;
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::logger::Logger;
 use lightning::util::ser::{ReadableArgs, Writeable};
 
@@ -180,13 +180,13 @@ impl<'a> std::hash::Hash for Peer<'a> {
 }
 
 type ChannelMan<'a> = ChannelManager<
-       Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<FuzzEstimator>, &'a FuzzRouter, Arc<dyn Logger>>;
 type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan<'a>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn UtxoLookup>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler, Arc<KeyProvider>>;
 
 struct MoneyLossDetector<'a> {
        manager: Arc<ChannelMan<'a>>,
-       monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        handler: PeerMan<'a>,
 
        peers: &'a RefCell<[bool; 256]>,
@@ -200,7 +200,7 @@ struct MoneyLossDetector<'a> {
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
                   manager: Arc<ChannelMan<'a>>,
-                  monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+                  monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
                   handler: PeerMan<'a>) -> Self {
                MoneyLossDetector {
                        manager,
@@ -339,7 +339,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
@@ -351,7 +351,7 @@ impl SignerProvider for KeyProvider {
                let secp_ctx = Secp256k1::signing_only();
                let ctr = channel_keys_id[0];
                let (inbound, state) = self.signer_state.borrow().get(&ctr).unwrap().clone();
-               EnforcingSigner::new_with_revoked(if inbound {
+               TestChannelSigner::new_with_revoked(if inbound {
                        InMemorySigner::new(
                                &secp_ctx,
                                SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ctr]).unwrap(),
@@ -380,11 +380,11 @@ impl SignerProvider for KeyProvider {
                }, state, false)
        }
 
-       fn read_chan_signer(&self, mut data: &[u8]) -> Result<EnforcingSigner, DecodeError> {
+       fn read_chan_signer(&self, mut data: &[u8]) -> Result<TestChannelSigner, DecodeError> {
                let inner: InMemorySigner = ReadableArgs::read(&mut data, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        false
@@ -481,7 +481,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        let mut should_forward = false;
        let mut payments_received: Vec<PaymentHash> = Vec::new();
        let mut payments_sent = 0;
-       let mut pending_funding_generation: Vec<([u8; 32], PublicKey, u64, Script)> = Vec::new();
+       let mut pending_funding_generation: Vec<(ChannelId, PublicKey, u64, Script)> = Vec::new();
        let mut pending_funding_signatures = HashMap::new();
 
        loop {
@@ -527,10 +527,8 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                        4 => {
                                let final_value_msat = slice_to_be24(get_slice!(3)) as u64;
                                let payment_params = PaymentParameters::from_node_id(get_pubkey!(), 42);
-                               let params = RouteParameters {
-                                       payment_params,
-                                       final_value_msat,
-                               };
+                               let params = RouteParameters::from_payment_params_and_value(
+                                       payment_params, final_value_msat);
                                let mut payment_hash = PaymentHash([0; 32]);
                                payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
                                let mut sha = Sha256::engine();
@@ -548,10 +546,8 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                        15 => {
                                let final_value_msat = slice_to_be24(get_slice!(3)) as u64;
                                let payment_params = PaymentParameters::from_node_id(get_pubkey!(), 42);
-                               let params = RouteParameters {
-                                       payment_params,
-                                       final_value_msat,
-                               };
+                               let params = RouteParameters::from_payment_params_and_value(
+                                       payment_params, final_value_msat);
                                let mut payment_hash = PaymentHash([0; 32]);
                                payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
                                let mut sha = Sha256::engine();
index 6cdeb8ab5d205f66be9083f035ea767d878cfad8..5b5cd69cf9681ac2d1fa5c8b75a52eebbd0447a5 100644 (file)
@@ -29,5 +29,7 @@ pub mod refund_deser;
 pub mod router;
 pub mod zbase32;
 pub mod onion_hop_data;
+pub mod base32;
+pub mod fromstr_to_netaddress;
 
 pub mod msg_targets;
index 54b283ab0f1628974fa3545978699453f65330bb..cc80ccf932088423aa37e1a637c0b85e22322602 100644 (file)
 // To modify it, modify msg_target_template.txt and run gen_target.sh instead.
 
 use crate::utils::test_logger;
+use lightning::util::test_utils;
 
 #[inline]
 pub fn onion_hop_data_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
-       use lightning::util::ser::Readable;
+       use lightning::util::ser::ReadableArgs;
        let mut r = ::std::io::Cursor::new(data);
-       let _ =  <lightning::ln::msgs::InboundOnionPayload as Readable>::read(&mut r);
+       let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
+       let _ =  <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestNodeSigner>>::read(&mut r, &&node_signer);
 }
 
 #[no_mangle]
 pub extern "C" fn onion_hop_data_run(data: *const u8, datalen: usize) {
-       use lightning::util::ser::Readable;
+       use lightning::util::ser::ReadableArgs;
        let data = unsafe { std::slice::from_raw_parts(data, datalen) };
        let mut r = ::std::io::Cursor::new(data);
-       let _ =  <lightning::ln::msgs::InboundOnionPayload as Readable>::read(&mut r);
+       let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
+       let _ =  <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestNodeSigner>>::read(&mut r, &&node_signer);
 }
index 0ffc090ea197514c8ab5eacf3e2a13030b11f759..d2e35cd45cdbabc7745abd4f49c8cdf94e793362 100644 (file)
@@ -11,7 +11,7 @@ use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
 use lightning::ln::script::ShutdownScript;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::logger::Logger;
 use lightning::util::ser::{Readable, Writeable, Writer};
 use lightning::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessagePath, OnionMessenger};
@@ -174,7 +174,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!() }
 
@@ -182,7 +182,7 @@ impl SignerProvider for KeyProvider {
                unreachable!()
        }
 
-       fn read_chan_signer(&self, _data: &[u8]) -> Result<EnforcingSigner, DecodeError> { unreachable!() }
+       fn read_chan_signer(&self, _data: &[u8]) -> Result<TestChannelSigner, DecodeError> { unreachable!() }
 
        fn get_destination_script(&self) -> Result<Script, ()> { unreachable!() }
 
index 6195e736618520bfaf9e4e87dca9ba2139988d8f..b7d45bf729f4d6c488c5916de536a5cf6f02f6dc 100644 (file)
@@ -13,6 +13,7 @@ use bitcoin::hash_types::BlockHash;
 
 use lightning::blinded_path::{BlindedHop, BlindedPath};
 use lightning::chain::transaction::OutPoint;
+use lightning::ln::ChannelId;
 use lightning::ln::channelmanager::{self, ChannelDetails, ChannelCounterparty};
 use lightning::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures};
 use lightning::ln::msgs;
@@ -210,7 +211,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
                                                let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap());
                                                $first_hops_vec.push(ChannelDetails {
-                                                       channel_id: [0; 32],
+                                                       channel_id: ChannelId::new_zero(),
                                                        counterparty: ChannelCounterparty {
                                                                node_id: *rnid,
                                                                features: channelmanager::provided_init_features(&UserConfig::default()),
@@ -325,11 +326,10 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                let mut last_hops = Vec::new();
                                last_hops!(last_hops);
                                find_routes!(first_hops, node_pks.iter(), |final_amt, final_delta, target: &PublicKey| {
-                                       RouteParameters {
-                                               payment_params: PaymentParameters::from_node_id(*target, final_delta)
+                                       RouteParameters::from_payment_params_and_value(
+                                               PaymentParameters::from_node_id(*target, final_delta)
                                                        .with_route_hints(last_hops.clone()).unwrap(),
-                                               final_value_msat: final_amt,
-                                       }
+                                               final_amt)
                                });
                        },
                        x => {
@@ -365,11 +365,9 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                let mut features = Bolt12InvoiceFeatures::empty();
                                features.set_basic_mpp_optional();
                                find_routes!(first_hops, vec![dummy_pk].iter(), |final_amt, _, _| {
-                                       RouteParameters {
-                                               payment_params: PaymentParameters::blinded(last_hops.clone())
-                                                       .with_bolt12_features(features.clone()).unwrap(),
-                                               final_value_msat: final_amt,
-                                       }
+                                       RouteParameters::from_payment_params_and_value(PaymentParameters::blinded(last_hops.clone())
+                                               .with_bolt12_features(features.clone()).unwrap(),
+                                       final_amt)
                                });
                        }
                }
index e3635297adb037500adafebd98708025c99fa762..89de25aa5e6a192786a36b8d97605dbabf86b25d 100644 (file)
@@ -2,19 +2,19 @@ use lightning::chain;
 use lightning::chain::{chainmonitor, channelmonitor};
 use lightning::chain::chainmonitor::MonitorUpdateId;
 use lightning::chain::transaction::OutPoint;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 
 use std::sync::Mutex;
 
 pub struct TestPersister {
        pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
 }
-impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+impl chainmonitor::Persist<TestChannelSigner> for TestPersister {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 }
index 5ea453bfb86934f24a93e7e2580b592f4460c61f..04979204e9898ddab007b59d3efceefa3d5086be 100644 (file)
@@ -7,18 +7,19 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use lightning::util::zbase32;
+use lightning::util::base32;
 
 use crate::utils::test_logger;
 
 #[inline]
 pub fn do_test(data: &[u8]) {
-       let res = zbase32::encode(data);
-       assert_eq!(&zbase32::decode(&res).unwrap()[..], data);
+       let res = base32::Alphabet::ZBase32.encode(data);
+       assert_eq!(&base32::Alphabet::ZBase32.decode(&res).unwrap()[..], data);
 
        if let Ok(s) = std::str::from_utf8(data) {
-               if let Ok(decoded) = zbase32::decode(s) {
-                       assert_eq!(&zbase32::encode(&decoded), &s.to_ascii_lowercase());
+               let res = base32::Alphabet::ZBase32.decode(s);
+               if let Ok(decoded) = res {
+                       assert_eq!(&base32::Alphabet::ZBase32.encode(&decoded), &s.to_ascii_lowercase());
                }
        }
 }
index 9b5a6d4553645215a32f6679c90d1b482d3d5c16..cad0ac4d822d7cd9e46e3acd39977e58e5e3ffda 100644 (file)
@@ -14,6 +14,8 @@ void router_run(const unsigned char* data, size_t data_len);
 void zbase32_run(const unsigned char* data, size_t data_len);
 void indexedmap_run(const unsigned char* data, size_t data_len);
 void onion_hop_data_run(const unsigned char* data, size_t data_len);
+void base32_run(const unsigned char* data, size_t data_len);
+void fromstr_to_netaddress_run(const unsigned char* data, size_t data_len);
 void msg_accept_channel_run(const unsigned char* data, size_t data_len);
 void msg_announcement_signatures_run(const unsigned char* data, size_t data_len);
 void msg_channel_reestablish_run(const unsigned char* data, size_t data_len);
index 1ed6a2a8345bf8751ad6389e8c306804bedf1e91..6a36874a384bf9ca23a91d2cef6557853943aa2f 100644 (file)
@@ -34,7 +34,7 @@ use lightning::ln::peer_handler::APeerManager;
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::Router;
-use lightning::routing::scoring::{Score, WriteableScore};
+use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
 use lightning::util::logger::Logger;
 use lightning::util::persist::Persister;
 #[cfg(feature = "std")]
@@ -241,23 +241,27 @@ fn handle_network_graph_update<L: Deref>(
 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
        scorer: &'a S, event: &Event
 ) -> bool {
-       let mut score = scorer.lock();
        match event {
                Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
+                       let mut score = scorer.write_lock();
                        score.payment_path_failed(path, *scid);
                },
                Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
                        // Reached if the destination explicitly failed it back. We treat this as a successful probe
                        // because the payment made it all the way to the destination with sufficient liquidity.
+                       let mut score = scorer.write_lock();
                        score.probe_successful(path);
                },
                Event::PaymentPathSuccessful { path, .. } => {
+                       let mut score = scorer.write_lock();
                        score.payment_path_successful(path);
                },
                Event::ProbeSuccessful { path, .. } => {
+                       let mut score = scorer.write_lock();
                        score.probe_successful(path);
                },
                Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
+                       let mut score = scorer.write_lock();
                        score.probe_failed(path, *scid);
                },
                _ => return false,
@@ -311,7 +315,7 @@ macro_rules! define_run_body {
                        // see `await_start`'s use below.
                        let mut await_start = None;
                        if $check_slow_await { await_start = Some($get_timer(1)); }
-                       let updates_available = $await;
+                       $await;
                        let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
 
                        // Exit the loop if the background processor was requested to stop.
@@ -320,7 +324,7 @@ macro_rules! define_run_body {
                                break;
                        }
 
-                       if updates_available {
+                       if $channel_manager.get_and_clear_needs_persistence() {
                                log_trace!($logger, "Persisting ChannelManager...");
                                $persister.persist_manager(&*$channel_manager)?;
                                log_trace!($logger, "Done persisting ChannelManager.");
@@ -496,9 +500,16 @@ use core::task;
 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
 /// could setup `process_events_async` like this:
 /// ```
-/// # struct MyPersister {}
-/// # impl lightning::util::persist::KVStorePersister for MyPersister {
-/// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
+/// # use lightning::io;
+/// # use std::sync::{Arc, Mutex};
+/// # use std::sync::atomic::{AtomicBool, Ordering};
+/// # use lightning_background_processor::{process_events_async, GossipSync};
+/// # struct MyStore {}
+/// # impl lightning::util::persist::KVStore for MyStore {
+/// #     fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
+/// #     fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
+/// #     fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
+/// #     fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
 /// # }
 /// # struct MyEventHandler {}
 /// # impl MyEventHandler {
@@ -510,23 +521,20 @@ use core::task;
 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
 /// #     fn disconnect_socket(&mut self) {}
 /// # }
-/// # use std::sync::{Arc, Mutex};
-/// # use std::sync::atomic::{AtomicBool, Ordering};
-/// # use lightning_background_processor::{process_events_async, GossipSync};
 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
-/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
+/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
 ///
-/// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
+/// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
 ///    let background_persister = Arc::clone(&my_persister);
 ///    let background_event_handler = Arc::clone(&my_event_handler);
 ///    let background_chain_mon = Arc::clone(&my_chain_monitor);
@@ -647,16 +655,14 @@ where
                channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
                gossip_sync, peer_manager, logger, scorer, should_break, {
                        let fut = Selector {
-                               a: channel_manager.get_persistable_update_future(),
+                               a: channel_manager.get_event_or_persistence_needed_future(),
                                b: chain_monitor.get_update_future(),
                                c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
                        };
                        match fut.await {
-                               SelectorOutput::A => true,
-                               SelectorOutput::B => false,
+                               SelectorOutput::A|SelectorOutput::B => {},
                                SelectorOutput::C(exit) => {
                                        should_break = exit;
-                                       false
                                }
                        }
                }, |t| sleeper(Duration::from_secs(t)),
@@ -779,10 +785,10 @@ impl BackgroundProcessor {
                        define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
                                channel_manager, channel_manager.process_pending_events(&event_handler),
                                gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
-                               Sleeper::from_two_futures(
-                                       channel_manager.get_persistable_update_future(),
+                               Sleeper::from_two_futures(
+                                       channel_manager.get_event_or_persistence_needed_future(),
                                        chain_monitor.get_update_future()
-                               ).wait_timeout(Duration::from_millis(100)),
+                               ).wait_timeout(Duration::from_millis(100)); },
                                |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
                });
                Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
@@ -858,12 +864,12 @@ mod tests {
        use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
        use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
        use lightning::routing::router::{DefaultRouter, Path, RouteHop};
-       use lightning::routing::scoring::{ChannelUsage, Score};
+       use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
        use lightning::util::config::UserConfig;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
-       use lightning::util::persist::KVStorePersister;
-       use lightning_persister::FilesystemPersister;
+       use lightning::util::persist::{KVStore, CHANNEL_MANAGER_PERSISTENCE_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_SUB_NAMESPACE, SCORER_PERSISTENCE_KEY};
+       use lightning_persister::fs_store::FilesystemStore;
        use std::collections::VecDeque;
        use std::{fs, env};
        use std::path::PathBuf;
@@ -902,7 +908,7 @@ mod tests {
                        >,
                        Arc<test_utils::TestLogger>>;
 
-       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
+       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
 
        type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
        type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
@@ -913,7 +919,7 @@ mod tests {
                rapid_gossip_sync: RGS,
                peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
                chain_monitor: Arc<ChainMonitor>,
-               persister: Arc<FilesystemPersister>,
+               kv_store: Arc<FilesystemStore>,
                tx_broadcaster: Arc<test_utils::TestBroadcaster>,
                network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
                logger: Arc<test_utils::TestLogger>,
@@ -937,9 +943,9 @@ mod tests {
 
        impl Drop for Node {
                fn drop(&mut self) {
-                       let data_dir = self.persister.get_data_dir();
+                       let data_dir = self.kv_store.get_data_dir();
                        match fs::remove_dir_all(data_dir.clone()) {
-                               Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
+                               Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
                                _ => {}
                        }
                }
@@ -950,13 +956,13 @@ mod tests {
                graph_persistence_notifier: Option<SyncSender<()>>,
                manager_error: Option<(std::io::ErrorKind, &'static str)>,
                scorer_error: Option<(std::io::ErrorKind, &'static str)>,
-               filesystem_persister: FilesystemPersister,
+               kv_store: FilesystemStore,
        }
 
        impl Persister {
-               fn new(data_dir: String) -> Self {
-                       let filesystem_persister = FilesystemPersister::new(data_dir);
-                       Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
+               fn new(data_dir: PathBuf) -> Self {
+                       let kv_store = FilesystemStore::new(data_dir);
+                       Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
                }
 
                fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
@@ -976,15 +982,25 @@ mod tests {
                }
        }
 
-       impl KVStorePersister for Persister {
-               fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
-                       if key == "manager" {
+       impl KVStore for Persister {
+               fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
+                       self.kv_store.read(namespace, sub_namespace, key)
+               }
+
+               fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
+                       if namespace == CHANNEL_MANAGER_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE &&
+                               key == CHANNEL_MANAGER_PERSISTENCE_KEY
+                       {
                                if let Some((error, message)) = self.manager_error {
                                        return Err(std::io::Error::new(error, message))
                                }
                        }
 
-                       if key == "network_graph" {
+                       if namespace == NETWORK_GRAPH_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE &&
+                               key == NETWORK_GRAPH_PERSISTENCE_KEY
+                       {
                                if let Some(sender) = &self.graph_persistence_notifier {
                                        match sender.send(()) {
                                                Ok(()) => {},
@@ -997,13 +1013,24 @@ mod tests {
                                }
                        }
 
-                       if key == "scorer" {
+                       if namespace == SCORER_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == SCORER_PERSISTENCE_SUB_NAMESPACE &&
+                               key == SCORER_PERSISTENCE_KEY
+                       {
                                if let Some((error, message)) = self.scorer_error {
                                        return Err(std::io::Error::new(error, message))
                                }
                        }
 
-                       self.filesystem_persister.persist(key, object)
+                       self.kv_store.write(namespace, sub_namespace, key, buf)
+               }
+
+               fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
+                       self.kv_store.remove(namespace, sub_namespace, key, lazy)
+               }
+
+               fn list(&self, namespace: &str, sub_namespace: &str) -> lightning::io::Result<Vec<String>> {
+                       self.kv_store.list(namespace, sub_namespace)
                }
        }
 
@@ -1033,12 +1060,14 @@ mod tests {
                fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
        }
 
-       impl Score for TestScorer {
+       impl ScoreLookUp for TestScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(
                        &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
                ) -> u64 { unimplemented!(); }
+       }
 
+       impl ScoreUpdate for TestScorer {
                fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
                        if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front().unwrap() {
@@ -1151,10 +1180,10 @@ mod tests {
                        let seed = [i as u8; 32];
                        let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
                        let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
-                       let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
+                       let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
                        let now = Duration::from_secs(genesis_block.header.time as u64);
                        let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
-                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
+                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
                        let best_block = BestBlock::from_network(network);
                        let params = ChainParameters { network, best_block };
                        let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
@@ -1166,7 +1195,7 @@ mod tests {
                                onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
                        };
                        let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
-                       let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
+                       let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
                        nodes.push(node);
                }
 
@@ -1261,7 +1290,7 @@ mod tests {
                let tx = open_channel!(nodes[0], nodes[1], 100000);
 
                // Initiate the background processors to watch each node.
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1295,7 +1324,7 @@ mod tests {
                check_persisted_data!(nodes[0].node, filepath.clone());
 
                loop {
-                       if !nodes[0].node.get_persistence_condvar_value() { break }
+                       if !nodes[0].node.get_event_or_persist_condvar_value() { break }
                }
 
                // Force-close the channel.
@@ -1304,7 +1333,7 @@ mod tests {
                // Check that the force-close updates are persisted.
                check_persisted_data!(nodes[0].node, filepath.clone());
                loop {
-                       if !nodes[0].node.get_persistence_condvar_value() { break }
+                       if !nodes[0].node.get_event_or_persist_condvar_value() { break }
                }
 
                // Check network graph is persisted
@@ -1326,7 +1355,7 @@ mod tests {
                // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
                // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
                let (_, nodes) = create_nodes(1, "test_timer_tick_called");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1353,7 +1382,7 @@ mod tests {
                let (_, nodes) = create_nodes(2, "test_persist_error");
                open_channel!(nodes[0], nodes[1], 100000);
 
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1373,7 +1402,7 @@ mod tests {
                let (_, nodes) = create_nodes(2, "test_persist_error_sync");
                open_channel!(nodes[0], nodes[1], 100000);
 
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
 
                let bp_future = super::process_events_async(
@@ -1399,7 +1428,7 @@ mod tests {
        fn test_network_graph_persist_error() {
                // Test that if we encounter an error during network graph persistence, an error gets returned.
                let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1417,7 +1446,7 @@ mod tests {
        fn test_scorer_persist_error() {
                // Test that if we encounter an error during scorer persistence, an error gets returned.
                let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1435,7 +1464,7 @@ mod tests {
        fn test_background_event_handling() {
                let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
                let channel_value = 100000;
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir.clone()));
 
                // Set up a background event handler for FundingGenerationReady events.
@@ -1508,7 +1537,7 @@ mod tests {
        #[test]
        fn test_scorer_persistence() {
                let (_, nodes) = create_nodes(2, "test_scorer_persistence");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1580,7 +1609,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
 
                let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
 
                let event_handler = |_: _| {};
@@ -1599,7 +1628,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
 
                let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
 
                let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
@@ -1739,7 +1768,7 @@ mod tests {
                };
 
                let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
@@ -1772,7 +1801,7 @@ mod tests {
                };
 
                let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
 
                let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
index d1b381130c0714c566d48a38dcf8b843935b6f95..d4e2b736757d35c62f348fccf122f3cd39403470 100644 (file)
@@ -50,11 +50,9 @@ use bech32::u5;
 use bitcoin::{Address, Network, PubkeyHash, ScriptHash};
 use bitcoin::util::address::{Payload, WitnessVersion};
 use bitcoin_hashes::{Hash, sha256};
-use lightning::ln::PaymentSecret;
 use lightning::ln::features::Bolt11InvoiceFeatures;
 #[cfg(any(doc, test))]
 use lightning::routing::gossip::RoutingFees;
-use lightning::routing::router::RouteHint;
 use lightning::util::invoice::construct_invoice_preimage;
 
 use secp256k1::PublicKey;
@@ -73,6 +71,11 @@ use core::str;
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Deserializer,Serialize, Serializer, de::Error};
 
+#[doc(no_inline)]
+pub use lightning::ln::PaymentSecret;
+#[doc(no_inline)]
+pub use lightning::routing::router::RouteHint;
+
 mod de;
 mod ser;
 mod tb;
index cc38e67c24325233919a65de4232ab156029323b..a512b2de05dd74eb7125bf543d44fd0dbe5ec315 100644 (file)
@@ -627,7 +627,7 @@ where
        log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
        for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
                if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() {
-                       log_trace!(logger, "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id));
+                       log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id);
                        continue;
                }
 
@@ -641,7 +641,7 @@ where
                                // If any public channel exists, return no hints and let the sender
                                // look at the public channels instead.
                                log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                                return vec![].into_iter().take(MAX_CHANNEL_HINTS).map(route_hint_from_channel);
                        }
                }
@@ -681,18 +681,18 @@ where
                                        log_trace!(logger,
                                                "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
                                                log_pubkey!(channel.counterparty.node_id),
-                                               log_bytes!(channel.channel_id), channel.short_channel_id,
+                                               &channel.channel_id, channel.short_channel_id,
                                                channel.inbound_capacity_msat,
-                                               log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+                                               &entry.get().channel_id, entry.get().short_channel_id,
                                                current_max_capacity);
                                        entry.insert(channel);
                                } else {
                                        log_trace!(logger,
                                                "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
                                                log_pubkey!(channel.counterparty.node_id),
-                                               log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+                                               &entry.get().channel_id, entry.get().short_channel_id,
                                                current_max_capacity,
-                                               log_bytes!(channel.channel_id), channel.short_channel_id,
+                                               &channel.channel_id, channel.short_channel_id,
                                                channel.inbound_capacity_msat);
                                }
                        }
@@ -731,14 +731,14 @@ where
 
                        if include_channel {
                                log_trace!(logger, "Including channel {} in invoice route hints",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                        } else if !has_enough_capacity {
                                log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                        } else {
                                debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public));
                                log_trace!(logger, "Ignoring channel {} with disconnected peer",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                        }
 
                        include_channel
@@ -869,10 +869,8 @@ mod test {
                                invoice.min_final_cltv_expiry_delta() as u32)
                        .with_bolt11_features(invoice.features().unwrap().clone()).unwrap()
                        .with_route_hints(invoice.route_hints()).unwrap();
-               let route_params = RouteParameters {
-                       payment_params,
-                       final_value_msat: invoice.amount_milli_satoshis().unwrap(),
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, invoice.amount_milli_satoshis().unwrap());
                let payment_event = {
                        let mut payment_hash = PaymentHash([0; 32]);
                        payment_hash.0.copy_from_slice(&invoice.payment_hash().as_ref()[0..32]);
@@ -1326,10 +1324,8 @@ mod test {
                                invoice.min_final_cltv_expiry_delta() as u32)
                        .with_bolt11_features(invoice.features().unwrap().clone()).unwrap()
                        .with_route_hints(invoice.route_hints()).unwrap();
-               let params = RouteParameters {
-                       payment_params,
-                       final_value_msat: invoice.amount_milli_satoshis().unwrap(),
-               };
+               let params = RouteParameters::from_payment_params_and_value(
+                       payment_params, invoice.amount_milli_satoshis().unwrap());
                let (payment_event, fwd_idx) = {
                        let mut payment_hash = PaymentHash([0; 32]);
                        payment_hash.0.copy_from_slice(&invoice.payment_hash().as_ref()[0..32]);
index c2befdae51cbf5d44d22014cb364c4fde39a6661..d2ac6e5474543db1392926dc8dfebf07715b0d86 100644 (file)
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] }
+tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
 lightning = { version = "0.0.116", path = "../lightning", features = ["_test_utils"] }
index d6d8004164bf9ab54db94ee64b55ef63f5f57e74..5527d85adf6f819e0b02e3ad05e18cc7c4426ae6 100644 (file)
 use bitcoin::secp256k1::PublicKey;
 
 use tokio::net::TcpStream;
-use tokio::{io, time};
+use tokio::time;
 use tokio::sync::mpsc;
-use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
 use lightning::ln::peer_handler::APeerManager;
-use lightning::ln::msgs::NetAddress;
+use lightning::ln::msgs::SocketAddress;
 
 use std::ops::Deref;
 use std::task::{self, Poll};
@@ -59,7 +58,7 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
 // define a trivial two- and three- select macro with the specific types we need and just use that.
 
 pub(crate) enum SelectorOutput {
-       A(Option<()>), B(Option<()>), C(tokio::io::Result<usize>),
+       A(Option<()>), B(Option<()>), C(tokio::io::Result<()>),
 }
 
 pub(crate) struct TwoSelector<
@@ -87,7 +86,7 @@ impl<
 }
 
 pub(crate) struct ThreeSelector<
-       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
 > {
        pub a: A,
        pub b: B,
@@ -95,7 +94,7 @@ pub(crate) struct ThreeSelector<
 }
 
 impl<
-       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
 > Future for ThreeSelector<A, B, C> {
        type Output = SelectorOutput;
        fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
@@ -119,7 +118,7 @@ impl<
 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
 /// read future (which is returned by schedule_read).
 struct Connection {
-       writer: Option<io::WriteHalf<TcpStream>>,
+       writer: Option<Arc<TcpStream>>,
        // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
        // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
        // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
@@ -156,7 +155,7 @@ impl Connection {
        async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
                peer_manager: PM,
                us: Arc<Mutex<Self>>,
-               mut reader: io::ReadHalf<TcpStream>,
+               reader: Arc<TcpStream>,
                mut read_wake_receiver: mpsc::Receiver<()>,
                mut write_avail_receiver: mpsc::Receiver<()>,
        ) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
@@ -200,7 +199,7 @@ impl Connection {
                                ThreeSelector {
                                        a: Box::pin(write_avail_receiver.recv()),
                                        b: Box::pin(read_wake_receiver.recv()),
-                                       c: Box::pin(reader.read(&mut buf)),
+                                       c: Box::pin(reader.readable()),
                                }.await
                        };
                        match select_result {
@@ -211,8 +210,9 @@ impl Connection {
                                        }
                                },
                                SelectorOutput::B(_) => {},
-                               SelectorOutput::C(read) => {
-                                       match read {
+                               SelectorOutput::C(res) => {
+                                       if res.is_err() { break Disconnect::PeerDisconnected; }
+                                       match reader.try_read(&mut buf) {
                                                Ok(0) => break Disconnect::PeerDisconnected,
                                                Ok(len) => {
                                                        let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
@@ -226,6 +226,10 @@ impl Connection {
                                                                Err(_) => break Disconnect::CloseConnection,
                                                        }
                                                },
+                                               Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+                                                       // readable() is allowed to spuriously wake, so we have to handle
+                                                       // WouldBlock here.
+                                               },
                                                Err(_) => break Disconnect::PeerDisconnected,
                                        }
                                },
@@ -239,18 +243,14 @@ impl Connection {
                        // here.
                        let _ = tokio::task::yield_now().await;
                };
-               let writer_option = us.lock().unwrap().writer.take();
-               if let Some(mut writer) = writer_option {
-                       // If the socket is already closed, shutdown() will fail, so just ignore it.
-                       let _ = writer.shutdown().await;
-               }
+               us.lock().unwrap().writer.take();
                if let Disconnect::PeerDisconnected = disconnect_type {
                        peer_manager.as_ref().socket_disconnected(&our_descriptor);
                        peer_manager.as_ref().process_events();
                }
        }
 
-       fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+       fn new(stream: StdTcpStream) -> (Arc<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
                // We only ever need a channel of depth 1 here: if we returned a non-full write to the
                // PeerManager, we will eventually get notified that there is room in the socket to write
                // new bytes, which will generate an event. That event will be popped off the queue before
@@ -262,24 +262,24 @@ impl Connection {
                // false.
                let (read_waker, read_receiver) = mpsc::channel(1);
                stream.set_nonblocking(true).unwrap();
-               let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
+               let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap());
 
-               (reader, write_receiver, read_receiver,
+               (Arc::clone(&tokio_stream), write_receiver, read_receiver,
                Arc::new(Mutex::new(Self {
-                       writer: Some(writer), write_avail, read_waker, read_paused: false,
+                       writer: Some(tokio_stream), write_avail, read_waker, read_paused: false,
                        rl_requested_disconnect: false,
                        id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
                })))
        }
 }
 
-fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<SocketAddress> {
        match stream.peer_addr() {
-               Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+               Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 {
                        addr: sockaddr.ip().octets(),
                        port: sockaddr.port(),
                }),
-               Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+               Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 {
                        addr: sockaddr.ip().octets(),
                        port: sockaddr.port(),
                }),
@@ -462,9 +462,9 @@ impl SocketDescriptor {
 }
 impl peer_handler::SocketDescriptor for SocketDescriptor {
        fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
-               // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
-               // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
-               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
+               // To send data, we take a lock on our Connection to access the TcpStream, writing to it if
+               // there's room in the kernel buffer, or otherwise create a new Waker with a
+               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
                // processing future which will call write_buffer_space_avail and we'll end up back here.
                let mut us = self.conn.lock().unwrap();
                if us.writer.is_none() {
@@ -484,24 +484,18 @@ impl peer_handler::SocketDescriptor for SocketDescriptor {
                let mut ctx = task::Context::from_waker(&waker);
                let mut written_len = 0;
                loop {
-                       match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
-                               task::Poll::Ready(Ok(res)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(res, 0);
-                                       written_len += res;
-                                       if written_len == data.len() { return written_len; }
-                               },
-                               task::Poll::Ready(Err(e)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
-                                       // Probably we've already been closed, just return what we have and let the
-                                       // read thread handle closing logic.
-                                       return written_len;
+                       match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) {
+                               task::Poll::Ready(Ok(())) => {
+                                       match us.writer.as_ref().unwrap().try_write(&data[written_len..]) {
+                                               Ok(res) => {
+                                                       debug_assert_ne!(res, 0);
+                                                       written_len += res;
+                                                       if written_len == data.len() { return written_len; }
+                                               },
+                                               Err(_) => return written_len,
+                                       }
                                },
+                               task::Poll::Ready(Err(_)) => return written_len,
                                task::Poll::Pending => {
                                        // We're queued up for a write event now, but we need to make sure we also
                                        // pause read given we're now waiting on the remote end to ACK (and in
index 35bddc0774604fcd653e084946d06477d01ad239..361ab0c57a1d13fd9ee39543534c0bcbbbac4ccf 100644 (file)
@@ -3,9 +3,9 @@ name = "lightning-persister"
 version = "0.0.116"
 authors = ["Valentine Wallace", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
-repository = "https://github.com/lightningdevkit/rust-lightning/"
+repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
-Utilities to manage Rust-Lightning channel data persistence and retrieval.
+Utilities for LDK data persistence and retrieval.
 """
 edition = "2018"
 
@@ -16,13 +16,13 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116", path = "../lightning" }
-libc = "0.2"
 
 [target.'cfg(windows)'.dependencies]
-winapi = { version = "0.3", features = ["winbase"] }
+windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
 
 [target.'cfg(ldk_bench)'.dependencies]
 criterion = { version = "0.4", optional = true, default-features = false }
 
 [dev-dependencies]
 lightning = { version = "0.0.116", path = "../lightning", features = ["_test_utils"] }
+bitcoin = { version = "0.29.0", default-features = false }
diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs
new file mode 100644 (file)
index 0000000..81f9709
--- /dev/null
@@ -0,0 +1,531 @@
+//! Objects related to [`FilesystemStore`] live here.
+use crate::utils::{check_namespace_key_validity, is_valid_kvstore_str};
+
+use lightning::util::persist::KVStore;
+use lightning::util::string::PrintableString;
+
+use std::collections::HashMap;
+use std::fs;
+use std::io::{Read, Write};
+use std::path::{Path, PathBuf};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex, RwLock};
+
+#[cfg(target_os = "windows")]
+use {std::ffi::OsStr, std::os::windows::ffi::OsStrExt};
+
+#[cfg(target_os = "windows")]
+macro_rules! call {
+       ($e: expr) => {
+               if $e != 0 {
+                       Ok(())
+               } else {
+                       Err(std::io::Error::last_os_error())
+               }
+       };
+}
+
+#[cfg(target_os = "windows")]
+fn path_to_windows_str<T: AsRef<OsStr>>(path: &T) -> Vec<u16> {
+       path.as_ref().encode_wide().chain(Some(0)).collect()
+}
+
+// The number of read/write/remove/list operations after which we clean up our `locks` HashMap.
+const GC_LOCK_INTERVAL: usize = 25;
+
+/// A [`KVStore`] implementation that writes to and reads from the file system.
+pub struct FilesystemStore {
+       data_dir: PathBuf,
+       tmp_file_counter: AtomicUsize,
+       gc_counter: AtomicUsize,
+       locks: Mutex<HashMap<PathBuf, Arc<RwLock<()>>>>,
+}
+
+impl FilesystemStore {
+       /// Constructs a new [`FilesystemStore`].
+       pub fn new(data_dir: PathBuf) -> Self {
+               let locks = Mutex::new(HashMap::new());
+               let tmp_file_counter = AtomicUsize::new(0);
+               let gc_counter = AtomicUsize::new(1);
+               Self { data_dir, tmp_file_counter, gc_counter, locks }
+       }
+
+       /// Returns the data directory.
+       pub fn get_data_dir(&self) -> PathBuf {
+               self.data_dir.clone()
+       }
+
+       fn garbage_collect_locks(&self) {
+               let gc_counter = self.gc_counter.fetch_add(1, Ordering::AcqRel);
+
+               if gc_counter % GC_LOCK_INTERVAL == 0 {
+                       // Take outer lock for the cleanup.
+                       let mut outer_lock = self.locks.lock().unwrap();
+
+                       // Garbage collect all lock entries that are not referenced anymore.
+                       outer_lock.retain(|_, v| Arc::strong_count(&v) > 1);
+               }
+       }
+
+       fn get_dest_dir_path(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<PathBuf> {
+               let mut dest_dir_path = {
+                       #[cfg(target_os = "windows")]
+                       {
+                               let data_dir = self.data_dir.clone();
+                               fs::create_dir_all(data_dir.clone())?;
+                               fs::canonicalize(data_dir)?
+                       }
+                       #[cfg(not(target_os = "windows"))]
+                       {
+                               self.data_dir.clone()
+                       }
+               };
+
+               dest_dir_path.push(namespace);
+               if !sub_namespace.is_empty() {
+                       dest_dir_path.push(sub_namespace);
+               }
+
+               Ok(dest_dir_path)
+       }
+}
+
+impl KVStore for FilesystemStore {
+       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> std::io::Result<Vec<u8>> {
+               check_namespace_key_validity(namespace, sub_namespace, Some(key), "read")?;
+
+               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               dest_file_path.push(key);
+
+               let mut buf = Vec::new();
+               {
+                       let inner_lock_ref = {
+                               let mut outer_lock = self.locks.lock().unwrap();
+                               Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+                       };
+                       let _guard = inner_lock_ref.read().unwrap();
+
+                       let mut f = fs::File::open(dest_file_path)?;
+                       f.read_to_end(&mut buf)?;
+               }
+
+               self.garbage_collect_locks();
+
+               Ok(buf)
+       }
+
+       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> std::io::Result<()> {
+               check_namespace_key_validity(namespace, sub_namespace, Some(key), "write")?;
+
+               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               dest_file_path.push(key);
+
+               let parent_directory = dest_file_path
+                       .parent()
+                       .ok_or_else(|| {
+                               let msg =
+                                       format!("Could not retrieve parent directory of {}.", dest_file_path.display());
+                               std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
+                       })?;
+               fs::create_dir_all(&parent_directory)?;
+
+               // Do a crazy dance with lots of fsync()s to be overly cautious here...
+               // We never want to end up in a state where we've lost the old data, or end up using the
+               // old data on power loss after we've returned.
+               // The way to atomically write a file on Unix platforms is:
+               // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
+               let mut tmp_file_path = dest_file_path.clone();
+               let tmp_file_ext = format!("{}.tmp", self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
+               tmp_file_path.set_extension(tmp_file_ext);
+
+               {
+                       let mut tmp_file = fs::File::create(&tmp_file_path)?;
+                       tmp_file.write_all(&buf)?;
+                       tmp_file.sync_all()?;
+               }
+
+               let res = {
+                       let inner_lock_ref = {
+                               let mut outer_lock = self.locks.lock().unwrap();
+                               Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+                       };
+                       let _guard = inner_lock_ref.write().unwrap();
+
+                       #[cfg(not(target_os = "windows"))]
+                       {
+                               fs::rename(&tmp_file_path, &dest_file_path)?;
+                               let dir_file = fs::OpenOptions::new().read(true).open(&parent_directory)?;
+                               dir_file.sync_all()?;
+                               Ok(())
+                       }
+
+                       #[cfg(target_os = "windows")]
+                       {
+                               let res = if dest_file_path.exists() {
+                                       call!(unsafe {
+                                               windows_sys::Win32::Storage::FileSystem::ReplaceFileW(
+                                                       path_to_windows_str(&dest_file_path).as_ptr(),
+                                                       path_to_windows_str(&tmp_file_path).as_ptr(),
+                                                       std::ptr::null(),
+                                                       windows_sys::Win32::Storage::FileSystem::REPLACEFILE_IGNORE_MERGE_ERRORS,
+                                                       std::ptr::null_mut() as *const core::ffi::c_void,
+                                                       std::ptr::null_mut() as *const core::ffi::c_void,
+                                                       )
+                                       })
+                               } else {
+                                       call!(unsafe {
+                                               windows_sys::Win32::Storage::FileSystem::MoveFileExW(
+                                                       path_to_windows_str(&tmp_file_path).as_ptr(),
+                                                       path_to_windows_str(&dest_file_path).as_ptr(),
+                                                       windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
+                                                       | windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
+                                                       )
+                                       })
+                               };
+
+                               match res {
+                                       Ok(()) => {
+                                               // We fsync the dest file in hopes this will also flush the metadata to disk.
+                                               let dest_file = fs::OpenOptions::new().read(true).write(true)
+                                                       .open(&dest_file_path)?;
+                                               dest_file.sync_all()?;
+                                               Ok(())
+                                       }
+                                       Err(e) => Err(e),
+                               }
+                       }
+               };
+
+               self.garbage_collect_locks();
+
+               res
+       }
+
+       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> std::io::Result<()> {
+               check_namespace_key_validity(namespace, sub_namespace, Some(key), "remove")?;
+
+               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               dest_file_path.push(key);
+
+               if !dest_file_path.is_file() {
+                       return Ok(());
+               }
+
+               {
+                       let inner_lock_ref = {
+                               let mut outer_lock = self.locks.lock().unwrap();
+                               Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+                       };
+                       let _guard = inner_lock_ref.write().unwrap();
+
+                       if lazy {
+                               // If we're lazy we just call remove and be done with it.
+                               fs::remove_file(&dest_file_path)?;
+                       } else {
+                               // If we're not lazy we try our best to persist the updated metadata to ensure
+                               // atomicity of this call.
+                               #[cfg(not(target_os = "windows"))]
+                               {
+                                       fs::remove_file(&dest_file_path)?;
+
+                                       let parent_directory = dest_file_path.parent().ok_or_else(|| {
+                                               let msg =
+                                                       format!("Could not retrieve parent directory of {}.", dest_file_path.display());
+                                               std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
+                                       })?;
+                                       let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
+                                       // The above call to `fs::remove_file` corresponds to POSIX `unlink`, whose changes
+                                       // to the inode might get cached (and hence possibly lost on crash), depending on
+                                       // the target platform and file system.
+                                       //
+                                       // In order to assert we permanently removed the file in question we therefore
+                                       // call `fsync` on the parent directory on platforms that support it.
+                                       dir_file.sync_all()?;
+                               }
+
+                               #[cfg(target_os = "windows")]
+                               {
+                                       // Since Windows `DeleteFile` API is not persisted until the last open file handle
+                                       // is dropped, and there seemingly is no reliable way to flush the directory
+                                       // metadata, we here fall back to use a 'recycling bin' model, i.e., first move the
+                                       // file to be deleted to a temporary trash file and remove the latter file
+                                       // afterwards.
+                                       //
+                                       // This should be marginally better, as, according to the documentation,
+                                       // `MoveFileExW` APIs should offer stronger persistence guarantees,
+                                       // at least if `MOVEFILE_WRITE_THROUGH`/`MOVEFILE_REPLACE_EXISTING` is set.
+                                       // However, all this is partially based on assumptions and local experiments, as
+                                       // Windows API is horribly underdocumented.
+                                       let mut trash_file_path = dest_file_path.clone();
+                                       let trash_file_ext = format!("{}.trash",
+                                               self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
+                                       trash_file_path.set_extension(trash_file_ext);
+
+                                       call!(unsafe {
+                                               windows_sys::Win32::Storage::FileSystem::MoveFileExW(
+                                                       path_to_windows_str(&dest_file_path).as_ptr(),
+                                                       path_to_windows_str(&trash_file_path).as_ptr(),
+                                                       windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
+                                                       | windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
+                                                       )
+                                       })?;
+
+                                       {
+                                               // We fsync the trash file in hopes this will also flush the original's file
+                                               // metadata to disk.
+                                               let trash_file = fs::OpenOptions::new().read(true).write(true)
+                                                       .open(&trash_file_path.clone())?;
+                                               trash_file.sync_all()?;
+                                       }
+
+                                       // We're fine if this remove would fail as the trash file will be cleaned up in
+                                       // list eventually.
+                                       fs::remove_file(trash_file_path).ok();
+                               }
+                       }
+               }
+
+               self.garbage_collect_locks();
+
+               Ok(())
+       }
+
+       fn list(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<Vec<String>> {
+               check_namespace_key_validity(namespace, sub_namespace, None, "list")?;
+
+               let prefixed_dest = self.get_dest_dir_path(namespace, sub_namespace)?;
+               let mut keys = Vec::new();
+
+               if !Path::new(&prefixed_dest).exists() {
+                       return Ok(Vec::new());
+               }
+
+               for entry in fs::read_dir(&prefixed_dest)? {
+                       let entry = entry?;
+                       let p = entry.path();
+
+                       if let Some(ext) = p.extension() {
+                               #[cfg(target_os = "windows")]
+                               {
+                                       // Clean up any trash files lying around.
+                                       if ext == "trash" {
+                                               fs::remove_file(p).ok();
+                                               continue;
+                                       }
+                               }
+                               if ext == "tmp" {
+                                       continue;
+                               }
+                       }
+
+                       let metadata = p.metadata()?;
+
+                       // We allow the presence of directories in the empty namespace and just skip them.
+                       if metadata.is_dir() {
+                               continue;
+                       }
+
+                       // If we otherwise don't find a file at the given path something went wrong.
+                       if !metadata.is_file() {
+                               debug_assert!(false, "Failed to list keys of {}/{}: file couldn't be accessed.",
+                                       PrintableString(namespace), PrintableString(sub_namespace));
+                               let msg = format!("Failed to list keys of {}/{}: file couldn't be accessed.",
+                                       PrintableString(namespace), PrintableString(sub_namespace));
+                               return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+                       }
+
+                       match p.strip_prefix(&prefixed_dest) {
+                               Ok(stripped_path) => {
+                                       if let Some(relative_path) = stripped_path.to_str() {
+                                               if is_valid_kvstore_str(relative_path) {
+                                                       keys.push(relative_path.to_string())
+                                               }
+                                       } else {
+                                               debug_assert!(false, "Failed to list keys of {}/{}: file path is not valid UTF-8",
+                                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                               let msg = format!("Failed to list keys of {}/{}: file path is not valid UTF-8",
+                                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                               return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+                                       }
+                               }
+                               Err(e) => {
+                                       debug_assert!(false, "Failed to list keys of {}/{}: {}",
+                                               PrintableString(namespace), PrintableString(sub_namespace), e);
+                                       let msg = format!("Failed to list keys of {}/{}: {}",
+                                               PrintableString(namespace), PrintableString(sub_namespace), e);
+                                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+                               }
+                       }
+               }
+
+               self.garbage_collect_locks();
+
+               Ok(keys)
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+       use crate::test_utils::{do_read_write_remove_list_persist, do_test_store};
+
+       use bitcoin::hashes::hex::FromHex;
+       use bitcoin::Txid;
+
+       use lightning::chain::ChannelMonitorUpdateStatus;
+       use lightning::chain::chainmonitor::Persist;
+       use lightning::chain::transaction::OutPoint;
+       use lightning::check_closed_event;
+       use lightning::events::{ClosureReason, MessageSendEventsProvider};
+       use lightning::ln::functional_test_utils::*;
+       use lightning::util::test_utils;
+       use lightning::util::persist::read_channel_monitors;
+       use std::fs;
+       #[cfg(target_os = "windows")]
+       use {
+               lightning::get_event_msg,
+               lightning::ln::msgs::ChannelMessageHandler,
+       };
+
+       impl Drop for FilesystemStore {
+               fn drop(&mut self) {
+                       // We test for invalid directory names, so it's OK if directory removal
+                       // fails.
+                       match fs::remove_dir_all(&self.data_dir) {
+                               Err(e) => println!("Failed to remove test persister directory: {}", e),
+                               _ => {}
+                       }
+               }
+       }
+
+       #[test]
+       fn read_write_remove_list_persist() {
+               let mut temp_path = std::env::temp_dir();
+               temp_path.push("test_read_write_remove_list_persist");
+               let fs_store = FilesystemStore::new(temp_path);
+               do_read_write_remove_list_persist(&fs_store);
+       }
+
+       #[test]
+       fn test_if_monitors_is_not_dir() {
+               let store = FilesystemStore::new("test_monitors_is_not_dir".into());
+
+               fs::create_dir_all(&store.get_data_dir()).unwrap();
+               let mut path = std::path::PathBuf::from(&store.get_data_dir());
+               path.push("monitors");
+               fs::File::create(path).unwrap();
+
+               let chanmon_cfgs = create_chanmon_cfgs(1);
+               let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
+               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &store, node_cfgs[0].keys_manager);
+               node_cfgs[0].chain_monitor = chain_mon_0;
+               let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
+               let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
+
+               // Check that read_channel_monitors() returns error if monitors/ is not a
+               // directory.
+               assert!(read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err());
+       }
+
+       #[test]
+       fn test_filesystem_store() {
+               // Create the nodes, giving them FilesystemStores for data stores.
+               let store_0 = FilesystemStore::new("test_filesystem_store_0".into());
+               let store_1 = FilesystemStore::new("test_filesystem_store_1".into());
+               do_test_store(&store_0, &store_1)
+       }
+
+       // Test that if the store's path to channel data is read-only, writing a
+       // monitor to it results in the store returning a PermanentFailure.
+       // Windows ignores the read-only flag for folders, so this test is Unix-only.
+       #[cfg(not(target_os = "windows"))]
+       #[test]
+       fn test_readonly_dir_perm_failure() {
+               let store = FilesystemStore::new("test_readonly_dir_perm_failure".into());
+               fs::create_dir_all(&store.get_data_dir()).unwrap();
+
+               // Set up a dummy channel and force close. This will produce a monitor
+               // that we can then use to test persistence.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+
+               // Set the store's directory to read-only, which should result in
+               // returning a permanent failure when we then attempt to persist a
+               // channel update.
+               let path = &store.get_data_dir();
+               let mut perms = fs::metadata(path).unwrap().permissions();
+               perms.set_readonly(true);
+               fs::set_permissions(path, perms).unwrap();
+
+               let test_txo = OutPoint {
+                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
+                       index: 0
+               };
+               match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
+                       _ => panic!("unexpected result from persisting new channel")
+               }
+
+               nodes[1].node.get_and_clear_pending_msg_events();
+               added_monitors.clear();
+       }
+
+       // Test that if a store's directory name is invalid, monitor persistence
+       // will fail.
+       #[cfg(target_os = "windows")]
+       #[test]
+       fn test_fail_on_open() {
+               // Set up a dummy channel and force close. This will produce a monitor
+               // that we can then use to test persistence.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+
+               // Create the store with an invalid directory name and test that the
+               // channel fails to open because the directories fail to be created. There
+               // don't seem to be invalid filename characters on Unix that Rust doesn't
+               // handle, hence why the test is Windows-only.
+               let store = FilesystemStore::new(":<>/".into());
+
+               let test_txo = OutPoint {
+                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
+                       index: 0
+               };
+               match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
+                       _ => panic!("unexpected result from persisting new channel")
+               }
+
+               nodes[1].node.get_and_clear_pending_msg_events();
+               added_monitors.clear();
+       }
+}
+
+#[cfg(ldk_bench)]
+/// Benches
+pub mod bench {
+       use criterion::Criterion;
+
+       /// Bench!
+       pub fn bench_sends(bench: &mut Criterion) {
+               let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into());
+               let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into());
+               lightning::ln::channelmanager::bench::bench_two_sends(
+                       bench, "bench_filesystem_persisted_sends", store_a, store_b);
+       }
+}
index b34fe895b47e7df0a707db63b2225a1120de523b..ae258e137d742f32ce1067d6508ad133e02bd67a 100644 (file)
@@ -1,6 +1,6 @@
-//! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
-
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+//! Provides utilities for LDK data persistence and retrieval.
+//
+// TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
 #![deny(private_intra_doc_links)]
 
 
 #[cfg(ldk_bench)] extern crate criterion;
 
-mod util;
-
-extern crate lightning;
-extern crate bitcoin;
-extern crate libc;
-
-use bitcoin::hash_types::{BlockHash, Txid};
-use bitcoin::hashes::hex::FromHex;
-use lightning::chain::channelmonitor::ChannelMonitor;
-use lightning::sign::{EntropySource, SignerProvider};
-use lightning::util::ser::{ReadableArgs, Writeable};
-use lightning::util::persist::KVStorePersister;
-use std::fs;
-use std::io::Cursor;
-use std::ops::Deref;
-use std::path::{Path, PathBuf};
-
-/// FilesystemPersister persists channel data on disk, where each channel's
-/// data is stored in a file named after its funding outpoint.
-///
-/// Warning: this module does the best it can with calls to persist data, but it
-/// can only guarantee that the data is passed to the drive. It is up to the
-/// drive manufacturers to do the actual persistence properly, which they often
-/// don't (especially on consumer-grade hardware). Therefore, it is up to the
-/// user to validate their entire storage stack, to ensure the writes are
-/// persistent.
-/// Corollary: especially when dealing with larger amounts of money, it is best
-/// practice to have multiple channel data backups and not rely only on one
-/// FilesystemPersister.
-pub struct FilesystemPersister {
-       path_to_channel_data: String,
-}
-
-impl FilesystemPersister {
-       /// Initialize a new FilesystemPersister and set the path to the individual channels'
-       /// files.
-       pub fn new(path_to_channel_data: String) -> Self {
-               Self {
-                       path_to_channel_data,
-               }
-       }
-
-       /// Get the directory which was provided when this persister was initialized.
-       pub fn get_data_dir(&self) -> String {
-               self.path_to_channel_data.clone()
-       }
-
-       /// Read `ChannelMonitor`s from disk.
-       pub fn read_channelmonitors<ES: Deref, SP: Deref> (
-               &self, entropy_source: ES, signer_provider: SP
-       ) -> std::io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
-               where
-                       ES::Target: EntropySource + Sized,
-                       SP::Target: SignerProvider + Sized
-       {
-               let mut path = PathBuf::from(&self.path_to_channel_data);
-               path.push("monitors");
-               if !Path::new(&path).exists() {
-                       return Ok(Vec::new());
-               }
-               let mut res = Vec::new();
-               for file_option in fs::read_dir(path)? {
-                       let file = file_option.unwrap();
-                       let owned_file_name = file.file_name();
-                       let filename = owned_file_name.to_str()
-                               .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::InvalidData,
-                                       "File name is not a valid utf8 string"))?;
-                       if !filename.is_ascii() || filename.len() < 65 {
-                               return Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid ChannelMonitor file name",
-                               ));
-                       }
-                       if filename.ends_with(".tmp") {
-                               // If we were in the middle of committing an new update and crashed, it should be
-                               // safe to ignore the update - we should never have returned to the caller and
-                               // irrevocably committed to the new state in any way.
-                               continue;
-                       }
-
-                       let txid: Txid = Txid::from_hex(filename.split_at(64).0)
-                               .map_err(|_| std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid tx ID in filename",
-                               ))?;
-
-                       let index: u16 = filename.split_at(65).1.parse()
-                               .map_err(|_| std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid tx index in filename",
-                               ))?;
+pub mod fs_store;
 
-                       let contents = fs::read(&file.path())?;
-                       let mut buffer = Cursor::new(&contents);
-                       match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(&mut buffer, (&*entropy_source, &*signer_provider)) {
-                               Ok((blockhash, channel_monitor)) => {
-                                       if channel_monitor.get_funding_txo().0.txid != txid || channel_monitor.get_funding_txo().0.index != index {
-                                               return Err(std::io::Error::new(std::io::ErrorKind::InvalidData,
-                                                                              "ChannelMonitor was stored in the wrong file"));
-                                       }
-                                       res.push((blockhash, channel_monitor));
-                               }
-                               Err(e) => return Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       format!("Failed to deserialize ChannelMonitor: {}", e),
-                               ))
-                       }
-               }
-               Ok(res)
-       }
-}
-
-impl KVStorePersister for FilesystemPersister {
-       fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
-               let mut dest_file = PathBuf::from(self.path_to_channel_data.clone());
-               dest_file.push(key);
-               util::write_to_file(dest_file, object)
-       }
-}
+mod utils;
 
 #[cfg(test)]
-mod tests {
-       extern crate lightning;
-       extern crate bitcoin;
-       use crate::FilesystemPersister;
-       use bitcoin::hashes::hex::FromHex;
-       use bitcoin::Txid;
-       use lightning::chain::ChannelMonitorUpdateStatus;
-       use lightning::chain::chainmonitor::Persist;
-       use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
-       use lightning::chain::transaction::OutPoint;
-       use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
-       use lightning::events::{ClosureReason, MessageSendEventsProvider};
-       use lightning::ln::functional_test_utils::*;
-       use lightning::util::test_utils;
-       use std::fs;
-       #[cfg(target_os = "windows")]
-       use {
-               lightning::get_event_msg,
-               lightning::ln::msgs::ChannelMessageHandler,
-       };
-
-       impl Drop for FilesystemPersister {
-               fn drop(&mut self) {
-                       // We test for invalid directory names, so it's OK if directory removal
-                       // fails.
-                       match fs::remove_dir_all(&self.path_to_channel_data) {
-                               Err(e) => println!("Failed to remove test persister directory: {}", e),
-                               _ => {}
-                       }
-               }
-       }
-
-       #[test]
-       fn test_if_monitors_is_not_dir() {
-               let persister = FilesystemPersister::new("test_monitors_is_not_dir".to_string());
-
-               fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-               let mut path = std::path::PathBuf::from(&persister.path_to_channel_data);
-               path.push("monitors");
-               fs::File::create(path).unwrap();
-
-               let chanmon_cfgs = create_chanmon_cfgs(1);
-               let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
-               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, node_cfgs[0].keys_manager);
-               node_cfgs[0].chain_monitor = chain_mon_0;
-               let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
-               let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
-
-               // Check that read_channelmonitors() returns error if monitors/ is not a
-               // directory.
-               assert!(persister.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).is_err());
-       }
-
-       // Integration-test the FilesystemPersister. Test relaying a few payments
-       // and check that the persisted data is updated the appropriate number of
-       // times.
-       #[test]
-       fn test_filesystem_persister() {
-               // Create the nodes, giving them FilesystemPersisters for data persisters.
-               let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
-               let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, node_cfgs[0].keys_manager);
-               let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, node_cfgs[1].keys_manager);
-               node_cfgs[0].chain_monitor = chain_mon_0;
-               node_cfgs[1].chain_monitor = chain_mon_1;
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-               // Check that the persisted channel data is empty before any channels are
-               // open.
-               let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_0.len(), 0);
-               let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_1.len(), 0);
-
-               // Helper to make sure the channel is on the expected update ID.
-               macro_rules! check_persisted_data {
-                       ($expected_update_id: expr) => {
-                               persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_0.len(), 1);
-                               for (_, mon) in persisted_chan_data_0.iter() {
-                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                               }
-                               persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_1.len(), 1);
-                               for (_, mon) in persisted_chan_data_1.iter() {
-                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                               }
-                       }
-               }
-
-               // Create some initial channel and check that a channel was persisted.
-               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
-               check_persisted_data!(0);
-
-               // Send a few payments and make sure the monitors are updated to the latest.
-               send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-               check_persisted_data!(5);
-               send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
-               check_persisted_data!(10);
-
-               // Force close because cooperative close doesn't result in any persisted
-               // updates.
-               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
-               check_closed_broadcast!(nodes[0], true);
-               check_added_monitors!(nodes[0], 1);
-
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 1);
-
-               connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
-               check_closed_broadcast!(nodes[1], true);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
-               check_added_monitors!(nodes[1], 1);
-
-               // Make sure everything is persisted as expected after close.
-               check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // Test that if the persister's path to channel data is read-only, writing a
-       // monitor to it results in the persister returning a PermanentFailure.
-       // Windows ignores the read-only flag for folders, so this test is Unix-only.
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_readonly_dir_perm_failure() {
-               let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
-               fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-
-               // Set up a dummy channel and force close. This will produce a monitor
-               // that we can then use to test persistence.
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
-               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
-               // Set the persister's directory to read-only, which should result in
-               // returning a permanent failure when we then attempt to persist a
-               // channel update.
-               let path = &persister.path_to_channel_data;
-               let mut perms = fs::metadata(path).unwrap().permissions();
-               perms.set_readonly(true);
-               fs::set_permissions(path, perms).unwrap();
-
-               let test_txo = OutPoint {
-                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
-                       index: 0
-               };
-               match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {},
-                       _ => panic!("unexpected result from persisting new channel")
-               }
-
-               nodes[1].node.get_and_clear_pending_msg_events();
-               added_monitors.clear();
-       }
-
-       // Test that if a persister's directory name is invalid, monitor persistence
-       // will fail.
-       #[cfg(target_os = "windows")]
-       #[test]
-       fn test_fail_on_open() {
-               // Set up a dummy channel and force close. This will produce a monitor
-               // that we can then use to test persistence.
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
-               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
-               // Create the persister with an invalid directory name and test that the
-               // channel fails to open because the directories fail to be created. There
-               // don't seem to be invalid filename characters on Unix that Rust doesn't
-               // handle, hence why the test is Windows-only.
-               let persister = FilesystemPersister::new(":<>/".to_string());
-
-               let test_txo = OutPoint {
-                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
-                       index: 0
-               };
-               match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {},
-                       _ => panic!("unexpected result from persisting new channel")
-               }
-
-               nodes[1].node.get_and_clear_pending_msg_events();
-               added_monitors.clear();
-       }
-}
-
-#[cfg(ldk_bench)]
-/// Benches
-pub mod bench {
-       use criterion::Criterion;
-
-       /// Bench!
-       pub fn bench_sends(bench: &mut Criterion) {
-               let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
-               let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
-               lightning::ln::channelmanager::bench::bench_two_sends(
-                       bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
-       }
-}
+mod test_utils;
diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs
new file mode 100644 (file)
index 0000000..9155750
--- /dev/null
@@ -0,0 +1,121 @@
+use lightning::util::persist::{KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN, read_channel_monitors};
+use lightning::ln::functional_test_utils::{connect_block, create_announced_chan_between_nodes,
+       create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs,
+       send_payment};
+use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
+use lightning::util::test_utils;
+use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
+use lightning::events::ClosureReason;
+
+use std::panic::RefUnwindSafe;
+
+pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_store: &K) {
+       let data = [42u8; 32];
+
+       let namespace = "testspace";
+       let sub_namespace = "testsubspace";
+       let key = "testkey";
+
+       // Test the basic KVStore operations.
+       kv_store.write(namespace, sub_namespace, key, &data).unwrap();
+
+       // Test empty namespace/sub_namespace is allowed, but not empty namespace and non-empty
+       // sub-namespace, and not empty key.
+       kv_store.write("", "", key, &data).unwrap();
+       let res = std::panic::catch_unwind(|| kv_store.write("", sub_namespace, key, &data));
+       assert!(res.is_err());
+       let res = std::panic::catch_unwind(|| kv_store.write(namespace, sub_namespace, "", &data));
+       assert!(res.is_err());
+
+       let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+       assert_eq!(listed_keys.len(), 1);
+       assert_eq!(listed_keys[0], key);
+
+       let read_data = kv_store.read(namespace, sub_namespace, key).unwrap();
+       assert_eq!(data, &*read_data);
+
+       kv_store.remove(namespace, sub_namespace, key, false).unwrap();
+
+       let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+       assert_eq!(listed_keys.len(), 0);
+
+       // Ensure we have no issue operating with namespace/sub_namespace/key being KVSTORE_NAMESPACE_KEY_MAX_LEN
+       let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect();
+       kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap();
+
+       let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
+       assert_eq!(listed_keys.len(), 1);
+       assert_eq!(listed_keys[0], max_chars);
+
+       let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap();
+       assert_eq!(data, &*read_data);
+
+       kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap();
+
+       let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
+       assert_eq!(listed_keys.len(), 0);
+}
+
+// Integration-test the given KVStore implementation. Test relaying a few payments and check that
+// the persisted data is updated the appropriate number of times.
+pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, store_0, node_cfgs[0].keys_manager);
+       let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, store_1, node_cfgs[1].keys_manager);
+       node_cfgs[0].chain_monitor = chain_mon_0;
+       node_cfgs[1].chain_monitor = chain_mon_1;
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Check that the persisted channel data is empty before any channels are
+       // open.
+       let mut persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
+       assert_eq!(persisted_chan_data_0.len(), 0);
+       let mut persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
+       assert_eq!(persisted_chan_data_1.len(), 0);
+
+       // Helper to make sure the channel is on the expected update ID.
+       macro_rules! check_persisted_data {
+               ($expected_update_id: expr) => {
+                       persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
+                       assert_eq!(persisted_chan_data_0.len(), 1);
+                       for (_, mon) in persisted_chan_data_0.iter() {
+                               assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+                       }
+                       persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
+                       assert_eq!(persisted_chan_data_1.len(), 1);
+                       for (_, mon) in persisted_chan_data_1.iter() {
+                               assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+                       }
+               }
+       }
+
+       // Create some initial channel and check that a channel was persisted.
+       let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+       check_persisted_data!(0);
+
+       // Send a few payments and make sure the monitors are updated to the latest.
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       check_persisted_data!(5);
+       send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
+       check_persisted_data!(10);
+
+       // Force close because cooperative close doesn't result in any persisted
+       // updates.
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_broadcast!(nodes[0], true);
+       check_added_monitors!(nodes[0], 1);
+
+       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       assert_eq!(node_txn.len(), 1);
+
+       connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
+       check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
+       check_added_monitors!(nodes[1], 1);
+
+       // Make sure everything is persisted as expected after close.
+       check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
+}
diff --git a/lightning-persister/src/util.rs b/lightning-persister/src/util.rs
deleted file mode 100644 (file)
index 20c4a81..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-#[cfg(target_os = "windows")]
-extern crate winapi;
-
-use std::fs;
-use std::path::PathBuf;
-use std::io::BufWriter;
-
-#[cfg(not(target_os = "windows"))]
-use std::os::unix::io::AsRawFd;
-
-use lightning::util::ser::Writeable;
-
-#[cfg(target_os = "windows")]
-use {
-       std::ffi::OsStr,
-       std::os::windows::ffi::OsStrExt
-};
-
-#[cfg(target_os = "windows")]
-macro_rules! call {
-       ($e: expr) => (
-               if $e != 0 {
-                       return Ok(())
-               } else {
-                       return Err(std::io::Error::last_os_error())
-               }
-       )
-}
-
-#[cfg(target_os = "windows")]
-fn path_to_windows_str<T: AsRef<OsStr>>(path: T) -> Vec<winapi::shared::ntdef::WCHAR> {
-       path.as_ref().encode_wide().chain(Some(0)).collect()
-}
-
-#[allow(bare_trait_objects)]
-pub(crate) fn write_to_file<W: Writeable>(dest_file: PathBuf, data: &W) -> std::io::Result<()> {
-       let mut tmp_file = dest_file.clone();
-       tmp_file.set_extension("tmp");
-
-       let parent_directory = dest_file.parent().unwrap();
-       fs::create_dir_all(parent_directory)?;
-       // Do a crazy dance with lots of fsync()s to be overly cautious here...
-       // We never want to end up in a state where we've lost the old data, or end up using the
-       // old data on power loss after we've returned.
-       // The way to atomically write a file on Unix platforms is:
-       // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
-       {
-               // Note that going by rust-lang/rust@d602a6b, on MacOS it is only safe to use
-               // rust stdlib 1.36 or higher.
-               let mut buf = BufWriter::new(fs::File::create(&tmp_file)?);
-               data.write(&mut buf)?;
-               buf.into_inner()?.sync_all()?;
-       }
-       // Fsync the parent directory on Unix.
-       #[cfg(not(target_os = "windows"))]
-       {
-               fs::rename(&tmp_file, &dest_file)?;
-               let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
-               unsafe { libc::fsync(dir_file.as_raw_fd()); }
-       }
-       #[cfg(target_os = "windows")]
-       {
-               if dest_file.exists() {
-                       unsafe {winapi::um::winbase::ReplaceFileW(
-                               path_to_windows_str(dest_file).as_ptr(), path_to_windows_str(tmp_file).as_ptr(), std::ptr::null(),
-                               winapi::um::winbase::REPLACEFILE_IGNORE_MERGE_ERRORS,
-                               std::ptr::null_mut() as *mut winapi::ctypes::c_void,
-                               std::ptr::null_mut() as *mut winapi::ctypes::c_void
-                       )};
-               } else {
-                       call!(unsafe {winapi::um::winbase::MoveFileExW(
-                               path_to_windows_str(tmp_file).as_ptr(), path_to_windows_str(dest_file).as_ptr(),
-                               winapi::um::winbase::MOVEFILE_WRITE_THROUGH | winapi::um::winbase::MOVEFILE_REPLACE_EXISTING
-                       )});
-               }
-       }
-       Ok(())
-}
-
-#[cfg(test)]
-mod tests {
-       use lightning::util::ser::{Writer, Writeable};
-
-       use super::{write_to_file};
-       use std::fs;
-       use std::io;
-       use std::path::PathBuf;
-
-       struct TestWriteable{}
-       impl Writeable for TestWriteable {
-               fn write<W: Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> {
-                       writer.write_all(&[42; 1])
-               }
-       }
-
-       // Test that if the persister's path to channel data is read-only, writing
-       // data to it fails. Windows ignores the read-only flag for folders, so this
-       // test is Unix-only.
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_readonly_dir() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_readonly_dir_persister_filename".to_string();
-               let path = "test_readonly_dir_persister_dir";
-               fs::create_dir_all(path).unwrap();
-               let mut perms = fs::metadata(path).unwrap().permissions();
-               perms.set_readonly(true);
-               fs::set_permissions(path, perms).unwrap();
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => assert_eq!(e.kind(), io::ErrorKind::PermissionDenied),
-                       _ => panic!("Unexpected error message")
-               }
-       }
-
-       // Test failure to rename in the process of atomically creating a channel
-       // monitor's file. We induce this failure by making the `tmp` file a
-       // directory.
-       // Explanation: given "from" = the file being renamed, "to" = the destination
-       // file that already exists: Unix should fail because if "from" is a file,
-       // then "to" is also required to be a file.
-       // TODO: ideally try to make this work on Windows again
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_rename_failure() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_rename_failure_filename";
-               let path = "test_rename_failure_dir";
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               // Create the channel data file and make it a directory.
-               fs::create_dir_all(dest_file.clone()).unwrap();
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => assert_eq!(e.raw_os_error(), Some(libc::EISDIR)),
-                       _ => panic!("Unexpected Ok(())")
-               }
-               fs::remove_dir_all(path).unwrap();
-       }
-
-       #[test]
-       fn test_diskwriteable_failure() {
-               struct FailingWriteable {}
-               impl Writeable for FailingWriteable {
-                       fn write<W: Writer>(&self, _writer: &mut W) -> Result<(), std::io::Error> {
-                               Err(std::io::Error::new(std::io::ErrorKind::Other, "expected failure"))
-                       }
-               }
-
-               let filename = "test_diskwriteable_failure";
-               let path = "test_diskwriteable_failure_dir";
-               let test_writeable = FailingWriteable{};
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => {
-                               assert_eq!(e.kind(), std::io::ErrorKind::Other);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "expected failure");
-                       },
-                       _ => panic!("unexpected result")
-               }
-               fs::remove_dir_all(path).unwrap();
-       }
-
-       // Test failure to create the temporary file in the persistence process.
-       // We induce this failure by having the temp file already exist and be a
-       // directory.
-       #[test]
-       fn test_tmp_file_creation_failure() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_tmp_file_creation_failure_filename".to_string();
-               let path = "test_tmp_file_creation_failure_dir";
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               let mut tmp_file = dest_file.clone();
-               tmp_file.set_extension("tmp");
-               fs::create_dir_all(tmp_file).unwrap();
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => {
-                               #[cfg(not(target_os = "windows"))]
-                               assert_eq!(e.raw_os_error(), Some(libc::EISDIR));
-                               #[cfg(target_os = "windows")]
-                               assert_eq!(e.kind(), io::ErrorKind::PermissionDenied);
-                       }
-                       _ => panic!("Unexpected error message")
-               }
-       }
-}
diff --git a/lightning-persister/src/utils.rs b/lightning-persister/src/utils.rs
new file mode 100644 (file)
index 0000000..54ec230
--- /dev/null
@@ -0,0 +1,59 @@
+use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN};
+use lightning::util::string::PrintableString;
+
+
+pub(crate) fn is_valid_kvstore_str(key: &str) -> bool {
+       key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c))
+}
+
+pub(crate) fn check_namespace_key_validity(namespace: &str, sub_namespace: &str, key: Option<&str>, operation: &str) -> Result<(), std::io::Error> {
+       if let Some(key) = key {
+               if key.is_empty() {
+                       debug_assert!(false, "Failed to {} {}/{}/{}: key may not be empty.", operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       let msg = format!("Failed to {} {}/{}/{}: key may not be empty.", operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+
+               if namespace.is_empty() && !sub_namespace.is_empty() {
+                       debug_assert!(false,
+                               "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       let msg = format!(
+                               "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.", operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+
+               if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) || !is_valid_kvstore_str(key) {
+                       debug_assert!(false, "Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+                               operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       let msg = format!("Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+                               operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+       } else {
+               if namespace.is_empty() && !sub_namespace.is_empty() {
+                       debug_assert!(false,
+                               "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       let msg = format!(
+                               "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+               if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) {
+                       debug_assert!(false, "Failed to {} {}/{}: namespace and sub-namespace must be valid.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       let msg = format!("Failed to {} {}/{}: namespace and sub-namespace must be valid.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+       }
+
+       Ok(())
+}
index 151f986553f86ec10d44fd6f196655aa711c8e1c..617b1213e89d368903b1903f151176bbc5b9e50e 100644 (file)
@@ -321,20 +321,3 @@ async fn test_esplora_syncs() {
                _ => panic!("Unexpected event"),
        }
 }
-
-#[tokio::test]
-#[cfg(any(feature = "esplora-async-https", feature = "esplora-blocking"))]
-async fn test_esplora_connects_to_public_server() {
-       let mut logger = TestLogger {};
-       let esplora_url = "https://blockstream.info/api".to_string();
-       let tx_sync = EsploraSyncClient::new(esplora_url, &mut logger);
-       let confirmable = TestConfirmable::new();
-
-       // Check we connect and pick up on new best blocks
-       assert_eq!(confirmable.best_block.lock().unwrap().1, 0);
-       #[cfg(feature = "esplora-async-https")]
-       tx_sync.sync(vec![&confirmable]).await.unwrap();
-       #[cfg(feature = "esplora-blocking")]
-       tx_sync.sync(vec![&confirmable]).unwrap();
-       assert_ne!(confirmable.best_block.lock().unwrap().1, 0);
-}
index 89087a10cd414ac9fdeae4331490ef97e73666ea..89bf7ce5d2f405c4ab7dd87e9ae5347875ad99a1 100644 (file)
@@ -15,8 +15,9 @@ pub(crate) mod utils;
 
 use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
 
-use crate::sign::EntropySource;
 use crate::ln::msgs::DecodeError;
+use crate::offers::invoice::BlindedPayInfo;
+use crate::sign::EntropySource;
 use crate::util::ser::{Readable, Writeable, Writer};
 
 use crate::io;
@@ -75,25 +76,44 @@ impl BlindedPath {
                })
        }
 
-       /// Create a blinded path for a payment, to be forwarded along `path`. The last node
-       /// in `path` will be the destination node.
+       /// Create a one-hop blinded path for a payment.
+       pub fn one_hop_for_payment<ES: EntropySource, T: secp256k1::Signing + secp256k1::Verification>(
+               payee_node_id: PublicKey, payee_tlvs: payment::ReceiveTlvs, entropy_source: &ES,
+               secp_ctx: &Secp256k1<T>
+       ) -> Result<(BlindedPayInfo, Self), ()> {
+               // This value is not considered in pathfinding for 1-hop blinded paths, because it's intended to
+               // be in relation to a specific channel.
+               let htlc_maximum_msat = u64::max_value();
+               Self::new_for_payment(
+                       &[], payee_node_id, payee_tlvs, htlc_maximum_msat, entropy_source, secp_ctx
+               )
+       }
+
+       /// Create a blinded path for a payment, to be forwarded along `intermediate_nodes`.
        ///
-       /// Errors if `path` is empty or a node id in `path` is invalid.
+       /// Errors if:
+       /// * a provided node id is invalid
+       /// * [`BlindedPayInfo`] calculation results in an integer overflow
+       /// * any unknown features are required in the provided [`ForwardTlvs`]
+       ///
+       /// [`ForwardTlvs`]: crate::blinded_path::payment::ForwardTlvs
        //  TODO: make all payloads the same size with padding + add dummy hops
-       pub fn new_for_payment<ES: EntropySource, T: secp256k1::Signing + secp256k1::Verification>(
-               intermediate_nodes: &[(PublicKey, payment::ForwardTlvs)], payee_node_id: PublicKey,
-               payee_tlvs: payment::ReceiveTlvs, entropy_source: &ES, secp_ctx: &Secp256k1<T>
-       ) -> Result<Self, ()> {
+       pub(crate) fn new_for_payment<ES: EntropySource, T: secp256k1::Signing + secp256k1::Verification>(
+               intermediate_nodes: &[payment::ForwardNode], payee_node_id: PublicKey,
+               payee_tlvs: payment::ReceiveTlvs, htlc_maximum_msat: u64, entropy_source: &ES,
+               secp_ctx: &Secp256k1<T>
+       ) -> Result<(BlindedPayInfo, Self), ()> {
                let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
                let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
 
-               Ok(BlindedPath {
-                       introduction_node_id: intermediate_nodes.first().map_or(payee_node_id, |n| n.0),
+               let blinded_payinfo = payment::compute_payinfo(intermediate_nodes, &payee_tlvs, htlc_maximum_msat)?;
+               Ok((blinded_payinfo, BlindedPath {
+                       introduction_node_id: intermediate_nodes.first().map_or(payee_node_id, |n| n.node_id),
                        blinding_point: PublicKey::from_secret_key(secp_ctx, &blinding_secret),
                        blinded_hops: payment::blinded_hops(
                                secp_ctx, intermediate_nodes, payee_node_id, payee_tlvs, &blinding_secret
                        ).map_err(|_| ())?,
-               })
+               }))
        }
 }
 
index 0f6cf01858dc5666411ca6f1d6fe10cfc3f9a323..39f16a91692cb30fb1be087191b4c32a506de830 100644 (file)
@@ -10,31 +10,48 @@ use crate::io;
 use crate::ln::PaymentSecret;
 use crate::ln::features::BlindedHopFeatures;
 use crate::ln::msgs::DecodeError;
+use crate::offers::invoice::BlindedPayInfo;
 use crate::prelude::*;
 use crate::util::ser::{Readable, Writeable, Writer};
 
+use core::convert::TryFrom;
+
+/// An intermediate node, its outbound channel, and relay parameters.
+#[derive(Clone, Debug)]
+pub struct ForwardNode {
+       /// The TLVs for this node's [`BlindedHop`], where the fee parameters contained within are also
+       /// used for [`BlindedPayInfo`] construction.
+       pub tlvs: ForwardTlvs,
+       /// This node's pubkey.
+       pub node_id: PublicKey,
+       /// The maximum value, in msat, that may be accepted by this node.
+       pub htlc_maximum_msat: u64,
+}
+
 /// Data to construct a [`BlindedHop`] for forwarding a payment.
+#[derive(Clone, Debug)]
 pub struct ForwardTlvs {
        /// The short channel id this payment should be forwarded out over.
-       short_channel_id: u64,
+       pub short_channel_id: u64,
        /// Payment parameters for relaying over [`Self::short_channel_id`].
-       payment_relay: PaymentRelay,
+       pub payment_relay: PaymentRelay,
        /// Payment constraints for relaying over [`Self::short_channel_id`].
-       payment_constraints: PaymentConstraints,
+       pub payment_constraints: PaymentConstraints,
        /// Supported and required features when relaying a payment onion containing this object's
        /// corresponding [`BlindedHop::encrypted_payload`].
        ///
        /// [`BlindedHop::encrypted_payload`]: crate::blinded_path::BlindedHop::encrypted_payload
-       features: BlindedHopFeatures,
+       pub features: BlindedHopFeatures,
 }
 
 /// Data to construct a [`BlindedHop`] for receiving a payment. This payload is custom to LDK and
 /// may not be valid if received by another lightning implementation.
+#[derive(Clone, Debug)]
 pub struct ReceiveTlvs {
        /// Used to authenticate the sender of a payment to the receiver and tie MPP HTLCs together.
-       payment_secret: PaymentSecret,
+       pub payment_secret: PaymentSecret,
        /// Constraints for the receiver of this payment.
-       payment_constraints: PaymentConstraints,
+       pub payment_constraints: PaymentConstraints,
 }
 
 /// Data to construct a [`BlindedHop`] for sending a payment over.
@@ -56,46 +73,66 @@ enum BlindedPaymentTlvsRef<'a> {
 /// Parameters for relaying over a given [`BlindedHop`].
 ///
 /// [`BlindedHop`]: crate::blinded_path::BlindedHop
+#[derive(Clone, Debug)]
 pub struct PaymentRelay {
        /// Number of blocks subtracted from an incoming HTLC's `cltv_expiry` for this [`BlindedHop`].
-       ///
-       ///[`BlindedHop`]: crate::blinded_path::BlindedHop
        pub cltv_expiry_delta: u16,
        /// Liquidity fee charged (in millionths of the amount transferred) for relaying a payment over
        /// this [`BlindedHop`], (i.e., 10,000 is 1%).
-       ///
-       ///[`BlindedHop`]: crate::blinded_path::BlindedHop
        pub fee_proportional_millionths: u32,
        /// Base fee charged (in millisatoshi) for relaying a payment over this [`BlindedHop`].
-       ///
-       ///[`BlindedHop`]: crate::blinded_path::BlindedHop
        pub fee_base_msat: u32,
 }
 
 /// Constraints for relaying over a given [`BlindedHop`].
 ///
 /// [`BlindedHop`]: crate::blinded_path::BlindedHop
+#[derive(Clone, Debug)]
 pub struct PaymentConstraints {
        /// The maximum total CLTV delta that is acceptable when relaying a payment over this
        /// [`BlindedHop`].
-       ///
-       ///[`BlindedHop`]: crate::blinded_path::BlindedHop
        pub max_cltv_expiry: u32,
-       /// The minimum value, in msat, that may be relayed over this [`BlindedHop`].
+       /// The minimum value, in msat, that may be accepted by the node corresponding to this
+       /// [`BlindedHop`].
        pub htlc_minimum_msat: u64,
 }
 
-impl_writeable_tlv_based!(ForwardTlvs, {
-       (2, short_channel_id, required),
-       (10, payment_relay, required),
-       (12, payment_constraints, required),
-       (14, features, required),
-});
+impl Writeable for ForwardTlvs {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               encode_tlv_stream!(w, {
+                       (2, self.short_channel_id, required),
+                       (10, self.payment_relay, required),
+                       (12, self.payment_constraints, required),
+                       (14, self.features, required)
+               });
+               Ok(())
+       }
+}
+
+impl Writeable for ReceiveTlvs {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               encode_tlv_stream!(w, {
+                       (12, self.payment_constraints, required),
+                       (65536, self.payment_secret, required)
+               });
+               Ok(())
+       }
+}
 
-impl_writeable_tlv_based!(ReceiveTlvs, {
-       (12, payment_constraints, required),
-       (65536, payment_secret, required),
-});
+// This will be removed once we support forwarding blinded HTLCs, because we'll always read a
+// `BlindedPaymentTlvs` instead.
+impl Readable for ReceiveTlvs {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               _init_and_read_tlv_stream!(r, {
+                       (12, payment_constraints, required),
+                       (65536, payment_secret, required),
+               });
+               Ok(Self {
+                       payment_secret: payment_secret.0.unwrap(),
+                       payment_constraints: payment_constraints.0.unwrap()
+               })
+       }
+}
 
 impl<'a> Writeable for BlindedPaymentTlvsRef<'a> {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
@@ -140,16 +177,98 @@ impl Readable for BlindedPaymentTlvs {
 
 /// Construct blinded payment hops for the given `intermediate_nodes` and payee info.
 pub(super) fn blinded_hops<T: secp256k1::Signing + secp256k1::Verification>(
-       secp_ctx: &Secp256k1<T>, intermediate_nodes: &[(PublicKey, ForwardTlvs)],
+       secp_ctx: &Secp256k1<T>, intermediate_nodes: &[ForwardNode],
        payee_node_id: PublicKey, payee_tlvs: ReceiveTlvs, session_priv: &SecretKey
 ) -> Result<Vec<BlindedHop>, secp256k1::Error> {
-       let pks = intermediate_nodes.iter().map(|(pk, _)| pk)
+       let pks = intermediate_nodes.iter().map(|node| &node.node_id)
                .chain(core::iter::once(&payee_node_id));
-       let tlvs = intermediate_nodes.iter().map(|(_, tlvs)| BlindedPaymentTlvsRef::Forward(tlvs))
+       let tlvs = intermediate_nodes.iter().map(|node| BlindedPaymentTlvsRef::Forward(&node.tlvs))
                .chain(core::iter::once(BlindedPaymentTlvsRef::Receive(&payee_tlvs)));
        utils::construct_blinded_hops(secp_ctx, pks, tlvs, session_priv)
 }
 
+/// `None` if underflow occurs.
+fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
+       let inbound_amt = inbound_amt_msat as u128;
+       let base = payment_relay.fee_base_msat as u128;
+       let prop = payment_relay.fee_proportional_millionths as u128;
+
+       let post_base_fee_inbound_amt =
+               if let Some(amt) = inbound_amt.checked_sub(base) { amt } else { return None };
+       let mut amt_to_forward =
+               (post_base_fee_inbound_amt * 1_000_000 + 1_000_000 + prop - 1) / (prop + 1_000_000);
+
+       let fee = ((amt_to_forward * prop) / 1_000_000) + base;
+       if inbound_amt - fee < amt_to_forward {
+               // Rounding up the forwarded amount resulted in underpaying this node, so take an extra 1 msat
+               // in fee to compensate.
+               amt_to_forward -= 1;
+       }
+       debug_assert_eq!(amt_to_forward + fee, inbound_amt);
+       u64::try_from(amt_to_forward).ok()
+}
+
+pub(super) fn compute_payinfo(
+       intermediate_nodes: &[ForwardNode], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64
+) -> Result<BlindedPayInfo, ()> {
+       let mut curr_base_fee: u64 = 0;
+       let mut curr_prop_mil: u64 = 0;
+       let mut cltv_expiry_delta: u16 = 0;
+       for tlvs in intermediate_nodes.iter().rev().map(|n| &n.tlvs) {
+               // In the future, we'll want to take the intersection of all supported features for the
+               // `BlindedPayInfo`, but there are no features in that context right now.
+               if tlvs.features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()) }
+
+               let next_base_fee = tlvs.payment_relay.fee_base_msat as u64;
+               let next_prop_mil = tlvs.payment_relay.fee_proportional_millionths as u64;
+               // Use integer arithmetic to compute `ceil(a/b)` as `(a+b-1)/b`
+               // ((curr_base_fee * (1_000_000 + next_prop_mil)) / 1_000_000) + next_base_fee
+               curr_base_fee = curr_base_fee.checked_mul(1_000_000 + next_prop_mil)
+                       .and_then(|f| f.checked_add(1_000_000 - 1))
+                       .map(|f| f / 1_000_000)
+                       .and_then(|f| f.checked_add(next_base_fee))
+                       .ok_or(())?;
+               // ceil(((curr_prop_mil + 1_000_000) * (next_prop_mil + 1_000_000)) / 1_000_000) - 1_000_000
+               curr_prop_mil = curr_prop_mil.checked_add(1_000_000)
+                       .and_then(|f1| next_prop_mil.checked_add(1_000_000).and_then(|f2| f2.checked_mul(f1)))
+                       .and_then(|f| f.checked_add(1_000_000 - 1))
+                       .map(|f| f / 1_000_000)
+                       .and_then(|f| f.checked_sub(1_000_000))
+                       .ok_or(())?;
+
+               cltv_expiry_delta = cltv_expiry_delta.checked_add(tlvs.payment_relay.cltv_expiry_delta).ok_or(())?;
+       }
+
+       let mut htlc_minimum_msat: u64 = 1;
+       let mut htlc_maximum_msat: u64 = 21_000_000 * 100_000_000 * 1_000; // Total bitcoin supply
+       for node in intermediate_nodes.iter() {
+               // The min htlc for an intermediate node is that node's min minus the fees charged by all of the
+               // following hops for forwarding that min, since that fee amount will automatically be included
+               // in the amount that this node receives and contribute towards reaching its min.
+               htlc_minimum_msat = amt_to_forward_msat(
+                       core::cmp::max(node.tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat),
+                       &node.tlvs.payment_relay
+               ).unwrap_or(1); // If underflow occurs, we definitely reached this node's min
+               htlc_maximum_msat = amt_to_forward_msat(
+                       core::cmp::min(node.htlc_maximum_msat, htlc_maximum_msat), &node.tlvs.payment_relay
+               ).ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max
+       }
+       htlc_minimum_msat = core::cmp::max(
+               payee_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat
+       );
+       htlc_maximum_msat = core::cmp::min(payee_htlc_maximum_msat, htlc_maximum_msat);
+
+       if htlc_maximum_msat < htlc_minimum_msat { return Err(()) }
+       Ok(BlindedPayInfo {
+               fee_base_msat: u32::try_from(curr_base_fee).map_err(|_| ())?,
+               fee_proportional_millionths: u32::try_from(curr_prop_mil).map_err(|_| ())?,
+               cltv_expiry_delta,
+               htlc_minimum_msat,
+               htlc_maximum_msat,
+               features: BlindedHopFeatures::empty(),
+       })
+}
+
 impl_writeable_msg!(PaymentRelay, {
        cltv_expiry_delta,
        fee_proportional_millionths,
@@ -160,3 +279,236 @@ impl_writeable_msg!(PaymentConstraints, {
        max_cltv_expiry,
        htlc_minimum_msat
 }, {});
+
+#[cfg(test)]
+mod tests {
+       use bitcoin::secp256k1::PublicKey;
+       use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, ReceiveTlvs, PaymentConstraints, PaymentRelay};
+       use crate::ln::PaymentSecret;
+       use crate::ln::features::BlindedHopFeatures;
+
+       #[test]
+       fn compute_payinfo() {
+               // Taken from the spec example for aggregating blinded payment info. See
+               // https://github.com/lightning/bolts/blob/master/proposals/route-blinding.md#blinded-payments
+               let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+               let intermediate_nodes = vec![ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 144,
+                                       fee_proportional_millionths: 500,
+                                       fee_base_msat: 100,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 100,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: u64::max_value(),
+               }, ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 144,
+                                       fee_proportional_millionths: 500,
+                                       fee_base_msat: 100,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 1_000,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: u64::max_value(),
+               }];
+               let recv_tlvs = ReceiveTlvs {
+                       payment_secret: PaymentSecret([0; 32]),
+                       payment_constraints: PaymentConstraints {
+                               max_cltv_expiry: 0,
+                               htlc_minimum_msat: 1,
+                       },
+               };
+               let htlc_maximum_msat = 100_000;
+               let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+               assert_eq!(blinded_payinfo.fee_base_msat, 201);
+               assert_eq!(blinded_payinfo.fee_proportional_millionths, 1001);
+               assert_eq!(blinded_payinfo.cltv_expiry_delta, 288);
+               assert_eq!(blinded_payinfo.htlc_minimum_msat, 900);
+               assert_eq!(blinded_payinfo.htlc_maximum_msat, htlc_maximum_msat);
+       }
+
+       #[test]
+       fn compute_payinfo_1_hop() {
+               let recv_tlvs = ReceiveTlvs {
+                       payment_secret: PaymentSecret([0; 32]),
+                       payment_constraints: PaymentConstraints {
+                               max_cltv_expiry: 0,
+                               htlc_minimum_msat: 1,
+                       },
+               };
+               let blinded_payinfo = super::compute_payinfo(&[], &recv_tlvs, 4242).unwrap();
+               assert_eq!(blinded_payinfo.fee_base_msat, 0);
+               assert_eq!(blinded_payinfo.fee_proportional_millionths, 0);
+               assert_eq!(blinded_payinfo.cltv_expiry_delta, 0);
+               assert_eq!(blinded_payinfo.htlc_minimum_msat, 1);
+               assert_eq!(blinded_payinfo.htlc_maximum_msat, 4242);
+       }
+
+       #[test]
+       fn simple_aggregated_htlc_min() {
+               // If no hops charge fees, the htlc_minimum_msat should just be the maximum htlc_minimum_msat
+               // along the path.
+               let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+               let intermediate_nodes = vec![ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 0,
+                                       fee_proportional_millionths: 0,
+                                       fee_base_msat: 0,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 1,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: u64::max_value()
+               }, ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 0,
+                                       fee_proportional_millionths: 0,
+                                       fee_base_msat: 0,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 2_000,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: u64::max_value()
+               }];
+               let recv_tlvs = ReceiveTlvs {
+                       payment_secret: PaymentSecret([0; 32]),
+                       payment_constraints: PaymentConstraints {
+                               max_cltv_expiry: 0,
+                               htlc_minimum_msat: 3,
+                       },
+               };
+               let htlc_maximum_msat = 100_000;
+               let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+               assert_eq!(blinded_payinfo.htlc_minimum_msat, 2_000);
+       }
+
+       #[test]
+       fn aggregated_htlc_min() {
+               // Create a path with varying fees and htlc_mins, and make sure htlc_minimum_msat ends up as the
+               // max (htlc_min - following_fees) along the path.
+               let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+               let intermediate_nodes = vec![ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 0,
+                                       fee_proportional_millionths: 500,
+                                       fee_base_msat: 1_000,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 5_000,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: u64::max_value()
+               }, ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 0,
+                                       fee_proportional_millionths: 500,
+                                       fee_base_msat: 200,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 2_000,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: u64::max_value()
+               }];
+               let recv_tlvs = ReceiveTlvs {
+                       payment_secret: PaymentSecret([0; 32]),
+                       payment_constraints: PaymentConstraints {
+                               max_cltv_expiry: 0,
+                               htlc_minimum_msat: 1,
+                       },
+               };
+               let htlc_minimum_msat = 3798;
+               assert!(super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_minimum_msat - 1).is_err());
+
+               let htlc_maximum_msat = htlc_minimum_msat + 1;
+               let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+               assert_eq!(blinded_payinfo.htlc_minimum_msat, htlc_minimum_msat);
+               assert_eq!(blinded_payinfo.htlc_maximum_msat, htlc_maximum_msat);
+       }
+
+       #[test]
+       fn aggregated_htlc_max() {
+               // Create a path with varying fees and `htlc_maximum_msat`s, and make sure the aggregated max
+               // htlc ends up as the min (htlc_max - following_fees) along the path.
+               let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+               let intermediate_nodes = vec![ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 0,
+                                       fee_proportional_millionths: 500,
+                                       fee_base_msat: 1_000,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 1,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: 5_000,
+               }, ForwardNode {
+                       node_id: dummy_pk,
+                       tlvs: ForwardTlvs {
+                               short_channel_id: 0,
+                               payment_relay: PaymentRelay {
+                                       cltv_expiry_delta: 0,
+                                       fee_proportional_millionths: 500,
+                                       fee_base_msat: 1,
+                               },
+                               payment_constraints: PaymentConstraints {
+                                       max_cltv_expiry: 0,
+                                       htlc_minimum_msat: 1,
+                               },
+                               features: BlindedHopFeatures::empty(),
+                       },
+                       htlc_maximum_msat: 10_000
+               }];
+               let recv_tlvs = ReceiveTlvs {
+                       payment_secret: PaymentSecret([0; 32]),
+                       payment_constraints: PaymentConstraints {
+                               max_cltv_expiry: 0,
+                               htlc_minimum_msat: 1,
+                       },
+               };
+
+               let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, 10_000).unwrap();
+               assert_eq!(blinded_payinfo.htlc_maximum_msat, 3997);
+       }
+}
index 7d698a220676b8dfd7c9a0df41b0b09f0799e2e9..b3b62a2e8706f96a1ddf82146b7260e63bc91796 100644 (file)
@@ -42,6 +42,7 @@ use crate::ln::channelmanager::ChannelDetails;
 
 use crate::prelude::*;
 use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
+use core::iter::FromIterator;
 use core::ops::Deref;
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 use bitcoin::secp256k1::PublicKey;
@@ -285,7 +286,22 @@ where C::Target: chain::Filter,
        where
                FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
        {
+               let funding_outpoints: HashSet<OutPoint> = HashSet::from_iter(self.monitors.read().unwrap().keys().cloned());
+               for funding_outpoint in funding_outpoints.iter() {
+                       let monitor_lock = self.monitors.read().unwrap();
+                       if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
+                               self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state);
+                       }
+               }
+
+               // do some followup cleanup if any funding outpoints were added in between iterations
                let monitor_states = self.monitors.write().unwrap();
+               for (funding_outpoint, monitor_state) in monitor_states.iter() {
+                       if !funding_outpoints.contains(funding_outpoint) {
+                               self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state);
+                       }
+               }
+
                if let Some(height) = best_height {
                        // If the best block height is being updated, update highest_chain_height under the
                        // monitors write lock.
@@ -295,55 +311,55 @@ where C::Target: chain::Filter,
                                self.highest_chain_height.store(new_height, Ordering::Release);
                        }
                }
+       }
 
-               for (funding_outpoint, monitor_state) in monitor_states.iter() {
-                       let monitor = &monitor_state.monitor;
-                       let mut txn_outputs;
-                       {
-                               txn_outputs = process(monitor, txdata);
-                               let update_id = MonitorUpdateId {
-                                       contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
-                               };
-                               let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
-                               if let Some(height) = best_height {
-                                       if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
-                                               // If there are not ChainSync persists awaiting completion, go ahead and
-                                               // set last_chain_persist_height here - we wouldn't want the first
-                                               // InProgress to always immediately be considered "overly delayed".
-                                               monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
-                                       }
+       fn update_monitor_with_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>) where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
+               let monitor = &monitor_state.monitor;
+               let mut txn_outputs;
+               {
+                       txn_outputs = process(monitor, txdata);
+                       let update_id = MonitorUpdateId {
+                               contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
+                       };
+                       let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
+                       if let Some(height) = best_height {
+                               if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
+                                       // If there are not ChainSync persists awaiting completion, go ahead and
+                                       // set last_chain_persist_height here - we wouldn't want the first
+                                       // InProgress to always immediately be considered "overly delayed".
+                                       monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
                                }
+                       }
 
-                               log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
-                               match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
-                                       ChannelMonitorUpdateStatus::Completed =>
-                                               log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
-                                       ChannelMonitorUpdateStatus::PermanentFailure => {
-                                               monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                               self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
-                                               self.event_notifier.notify();
-                                       },
-                                       ChannelMonitorUpdateStatus::InProgress => {
-                                               log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
-                                               pending_monitor_updates.push(update_id);
-                                       },
+                       log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+                       match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
+                               ChannelMonitorUpdateStatus::Completed =>
+                                       log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+                               ChannelMonitorUpdateStatus::PermanentFailure => {
+                                       monitor_state.channel_perm_failed.store(true, Ordering::Release);
+                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
+                                       self.event_notifier.notify();
+                               }
+                               ChannelMonitorUpdateStatus::InProgress => {
+                                       log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
+                                       pending_monitor_updates.push(update_id);
                                }
                        }
+               }
 
-                       // Register any new outputs with the chain source for filtering, storing any dependent
-                       // transactions from within the block that previously had not been included in txdata.
-                       if let Some(ref chain_source) = self.chain_source {
-                               let block_hash = header.block_hash();
-                               for (txid, mut outputs) in txn_outputs.drain(..) {
-                                       for (idx, output) in outputs.drain(..) {
-                                               // Register any new outputs with the chain source for filtering
-                                               let output = WatchedOutput {
-                                                       block_hash: Some(block_hash),
-                                                       outpoint: OutPoint { txid, index: idx as u16 },
-                                                       script_pubkey: output.script_pubkey,
-                                               };
-                                               chain_source.register_output(output)
-                                       }
+               // Register any new outputs with the chain source for filtering, storing any dependent
+               // transactions from within the block that previously had not been included in txdata.
+               if let Some(ref chain_source) = self.chain_source {
+                       let block_hash = header.block_hash();
+                       for (txid, mut outputs) in txn_outputs.drain(..) {
+                               for (idx, output) in outputs.drain(..) {
+                                       // Register any new outputs with the chain source for filtering
+                                       let output = WatchedOutput {
+                                               block_hash: Some(block_hash),
+                                               outpoint: OutPoint { txid, index: idx as u16 },
+                                               script_pubkey: output.script_pubkey,
+                                       };
+                                       chain_source.register_output(output)
                                }
                        }
                }
@@ -756,7 +772,7 @@ where C::Target: chain::Filter,
                                        monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
                                                > self.highest_chain_height.load(Ordering::Acquire)
                        {
-                               log_info!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
+                               log_debug!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
                        } else {
                                if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
                                        // If a `UpdateOrigin::ChainSync` persistence failed with `PermanantFailure`,
@@ -838,8 +854,8 @@ mod tests {
                create_announced_chan_between_nodes(&nodes, 0, 1);
 
                // Route two payments to be claimed at the same time.
-               let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-               let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+               let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+               let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
                chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
                chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
@@ -946,7 +962,7 @@ mod tests {
                let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
 
                // First route a payment that we will claim on chain and give the recipient the preimage.
-               let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+               let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
                nodes[1].node.claim_funds(payment_preimage);
                expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
                nodes[1].node.get_and_clear_pending_msg_events();
@@ -976,7 +992,7 @@ mod tests {
                        assert!(err.contains("ChannelMonitor storage failure")));
                check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
                check_closed_broadcast!(nodes[0], true);
-               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, 
+               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
                        [nodes[1].node.get_our_node_id()], 100000);
 
                // However, as the ChainMonitor is still waiting for the original persistence to complete,
index da3970da8eb887f6e33d3e9981eacdb77609e8f9..8c92ab47cc00b6611075e7a65d72cf4653365664 100644 (file)
@@ -67,7 +67,7 @@ use crate::sync::{Mutex, LockTestExt};
 /// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
 /// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
 /// transaction), a single update may reach upwards of 1 MiB in serialized size.
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 #[must_use]
 pub struct ChannelMonitorUpdate {
        pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
@@ -487,7 +487,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
 
 );
 
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub(crate) enum ChannelMonitorUpdateStep {
        LatestHolderCommitmentTXInfo {
                commitment_tx: HolderCommitmentTransaction,
@@ -2642,7 +2642,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                }
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
-                                               log_error!(logger, "    in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id()));
+                                               log_error!(logger, "    in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
                                                log_error!(logger, "    Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
                                        } else {
                                                // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager
@@ -3389,7 +3389,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
                                        let mut balance_spendable_csv = None;
                                        log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
-                                               log_bytes!(self.funding_info.0.to_channel_id()), txid);
+                                               &self.funding_info.0.to_channel_id(), txid);
                                        self.funding_spend_seen = true;
                                        let mut commitment_tx_to_counterparty_output = None;
                                        if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 {
@@ -3491,7 +3491,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
                        claimable_outpoints.push(commitment_package);
                        self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
-                       let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
+                       // Although we aren't signing the transaction directly here, the transaction will be signed
+                       // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
+                       // new channel updates.
                        self.holder_tx_signed = true;
                        // We can't broadcast our HTLC transactions while the commitment transaction is
                        // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
@@ -3501,7 +3503,8 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                // assuming it gets confirmed in the next block. Sadly, we have code which considers
                                // "not yet confirmed" things as discardable, so we cannot do that here.
                                let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
-                               let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
+                               let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
+                               let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
                                if !new_outputs.is_empty() {
                                        watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
                                }
index 35b7c86e7000912edbd115c01caa555a0fe03a99..1d874d43e4fdedb10baf8578ea6591f6ae5148aa 100644 (file)
@@ -1123,6 +1123,10 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                ret
        }
 
+       pub(crate) fn get_unsigned_holder_commitment_tx(&self) -> Transaction {
+               self.holder_commitment.trust().built_transaction().transaction.clone()
+       }
+
        //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
        // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after OutboundV1Channel::get_funding_created,
        // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
index ce449a4102c31498232874498c3e6cced2da8c20..22e7ec2c2f9eb90fcfaa8f5b754c27540aa854e8 100644 (file)
@@ -9,7 +9,9 @@
 
 //! Types describing on-chain transactions.
 
+use crate::ln::ChannelId;
 use bitcoin::hash_types::Txid;
+use bitcoin::hashes::Hash;
 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
 use bitcoin::blockdata::transaction::Transaction;
 
@@ -57,12 +59,8 @@ pub struct OutPoint {
 
 impl OutPoint {
        /// Convert an `OutPoint` to a lightning channel id.
-       pub fn to_channel_id(&self) -> [u8; 32] {
-               let mut res = [0; 32];
-               res[..].copy_from_slice(&self.txid[..]);
-               res[30] ^= ((self.index >> 8) & 0xff) as u8;
-               res[31] ^= ((self.index >> 0) & 0xff) as u8;
-               res
+       pub fn to_channel_id(&self) -> ChannelId {
+               ChannelId::v1_from_funding_txid(&self.txid.as_inner(), self.index)
        }
 
        /// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
@@ -94,10 +92,10 @@ mod tests {
                assert_eq!(&OutPoint {
                        txid: tx.txid(),
                        index: 0
-               }.to_channel_id(), &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
+               }.to_channel_id().0[..], &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
                assert_eq!(&OutPoint {
                        txid: tx.txid(),
                        index: 1
-               }.to_channel_id(), &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
+               }.to_channel_id().0[..], &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
        }
 }
index 4e7c3a224c29204d57d43220527ea9ba423b9620..2d44275abb5aa2288009c681e26df14e959a2ce1 100644 (file)
@@ -57,6 +57,12 @@ pub struct ChannelDerivationParameters {
        pub transaction_parameters: ChannelTransactionParameters,
 }
 
+impl_writeable_tlv_based!(ChannelDerivationParameters, {
+    (0, value_satoshis, required),
+    (2, keys_id, required),
+    (4, transaction_parameters, required),
+});
+
 /// A descriptor used to sign for a commitment transaction's anchor output.
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AnchorDescriptor {
@@ -139,6 +145,16 @@ pub struct HTLCDescriptor {
        pub counterparty_sig: Signature
 }
 
+impl_writeable_tlv_based!(HTLCDescriptor, {
+    (0, channel_derivation_parameters, required),
+    (2, commitment_txid, required),
+    (4, per_commitment_number, required),
+    (6, per_commitment_point, required),
+    (8, htlc, required),
+    (10, preimage, option),
+    (12, counterparty_sig, required),
+});
+
 impl HTLCDescriptor {
        /// Returns the outpoint of the HTLC output in the commitment transaction. This is the outpoint
        /// being spent by the HTLC input in the HTLC transaction.
index 36c0f20d2aed5d99f8183240d3332d67e0cd0130..bb98e271597309d057ca4712b394e302b35cddc3 100644 (file)
@@ -23,7 +23,7 @@ use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields};
 use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
 use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::msgs;
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::routing::gossip::NetworkUpdate;
 use crate::util::errors::APIError;
 use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
@@ -83,7 +83,7 @@ impl_writeable_tlv_based_enum!(PaymentPurpose,
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ClaimedHTLC {
        /// The `channel_id` of the channel over which the HTLC was received.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The `user_channel_id` of the channel over which the HTLC was received. This is the value
        /// passed in to [`ChannelManager::create_channel`] for outbound channels, or to
        /// [`ChannelManager::accept_inbound_channel`] for inbound channels if
@@ -246,7 +246,7 @@ pub enum HTLCDestination {
                /// counterparty node information.
                node_id: Option<PublicKey>,
                /// The outgoing `channel_id` between us and the next node.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
        },
        /// Scenario where we are unsure of the next node to forward the HTLC to.
        UnknownNextHop {
@@ -364,7 +364,7 @@ pub enum Event {
                /// [`ChannelManager::funding_transaction_generated`].
                ///
                /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
-               temporary_channel_id: [u8; 32],
+               temporary_channel_id: ChannelId,
                /// The counterparty's node_id, which you'll need to pass back into
                /// [`ChannelManager::funding_transaction_generated`].
                ///
@@ -458,7 +458,7 @@ pub enum Event {
                /// payment is to pay an invoice or to send a spontaneous payment.
                purpose: PaymentPurpose,
                /// The `channel_id` indicating over which channel we received the payment.
-               via_channel_id: Option<[u8; 32]>,
+               via_channel_id: Option<ChannelId>,
                /// The `user_channel_id` indicating over which channel we received the payment.
                via_user_channel_id: Option<u128>,
                /// The block height at which this payment will be failed back and will no longer be
@@ -508,6 +508,14 @@ pub enum Event {
                /// serialized prior to LDK version 0.0.117.
                sender_intended_total_msat: Option<u64>,
        },
+       /// Indicates a request for an invoice failed to yield a response in a reasonable amount of time
+       /// or was explicitly abandoned by [`ChannelManager::abandon_payment`].
+       ///
+       /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+       InvoiceRequestFailed {
+               /// The `payment_id` to have been associated with payment for the requested invoice.
+               payment_id: PaymentId,
+       },
        /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
        /// and we got back the payment preimage for it).
        ///
@@ -718,17 +726,17 @@ pub enum Event {
                /// The `channel_id` indicating which channel the spendable outputs belong to.
                ///
                /// This will always be `Some` for events generated by LDK versions 0.0.117 and above.
-               channel_id: Option<[u8; 32]>,
+               channel_id: Option<ChannelId>,
        },
        /// This event is generated when a payment has been successfully forwarded through us and a
        /// forwarding fee earned.
        PaymentForwarded {
                /// The incoming channel between the previous node and us. This is only `None` for events
                /// generated or serialized by versions prior to 0.0.107.
-               prev_channel_id: Option<[u8; 32]>,
+               prev_channel_id: Option<ChannelId>,
                /// The outgoing channel between the next node and us. This is only `None` for events
                /// generated or serialized by versions prior to 0.0.107.
-               next_channel_id: Option<[u8; 32]>,
+               next_channel_id: Option<ChannelId>,
                /// The fee, in milli-satoshis, which was earned as a result of the payment.
                ///
                /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
@@ -759,7 +767,7 @@ pub enum Event {
        /// [`Event::ChannelReady`] event.
        ChannelPending {
                /// The `channel_id` of the channel that is pending confirmation.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
                /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
                /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
@@ -772,7 +780,7 @@ pub enum Event {
                /// The `temporary_channel_id` this channel used to be known by during channel establishment.
                ///
                /// Will be `None` for channels created prior to LDK version 0.0.115.
-               former_temporary_channel_id: Option<[u8; 32]>,
+               former_temporary_channel_id: Option<ChannelId>,
                /// The `node_id` of the channel counterparty.
                counterparty_node_id: PublicKey,
                /// The outpoint of the channel's funding transaction.
@@ -784,7 +792,7 @@ pub enum Event {
        /// establishment.
        ChannelReady {
                /// The `channel_id` of the channel that is ready.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
                /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
                /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
@@ -811,7 +819,7 @@ pub enum Event {
        ChannelClosed  {
                /// The `channel_id` of the channel which has been closed. Note that on-chain transactions
                /// resolving the channel are likely still awaiting confirmation.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
                /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
                /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
@@ -838,7 +846,7 @@ pub enum Event {
        /// inputs for another purpose.
        DiscardFunding {
                /// The channel_id of the channel which has been closed.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The full transaction received from the user
                transaction: Transaction
        },
@@ -863,7 +871,7 @@ pub enum Event {
                ///
                /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
                /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
-               temporary_channel_id: [u8; 32],
+               temporary_channel_id: ChannelId,
                /// The node_id of the counterparty requesting to open the channel.
                ///
                /// When responding to the request, the `counterparty_node_id` should be passed
@@ -909,7 +917,7 @@ pub enum Event {
        /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
        HTLCHandlingFailed {
                /// The channel over which the HTLC was received.
-               prev_channel_id: [u8; 32],
+               prev_channel_id: ChannelId,
                /// Destination of the HTLC that failed to be processed.
                failed_next_destination: HTLCDestination,
        },
@@ -1148,6 +1156,12 @@ impl Writeable for Event {
                                        (8, funding_txo, required),
                                });
                        },
+                       &Event::InvoiceRequestFailed { ref payment_id } => {
+                               33u8.write(writer)?;
+                               write_tlv_fields!(writer, {
+                                       (0, payment_id, required),
+                               })
+                       },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
@@ -1279,7 +1293,7 @@ impl MaybeReadable for Event {
                        5u8 => {
                                let f = || {
                                        let mut outputs = WithoutLength(Vec::new());
-                                       let mut channel_id: Option<[u8; 32]> = None;
+                                       let mut channel_id: Option<ChannelId> = None;
                                        read_tlv_fields!(reader, {
                                                (0, outputs, required),
                                                (1, channel_id, option),
@@ -1335,7 +1349,7 @@ impl MaybeReadable for Event {
                        },
                        9u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut reason = UpgradableRequired(None);
                                        let mut user_channel_id_low_opt: Option<u64> = None;
                                        let mut user_channel_id_high_opt: Option<u64> = None;
@@ -1363,7 +1377,7 @@ impl MaybeReadable for Event {
                        },
                        11u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                                        read_tlv_fields!(reader, {
                                                (0, channel_id, required),
@@ -1474,7 +1488,7 @@ impl MaybeReadable for Event {
                        },
                        25u8 => {
                                let f = || {
-                                       let mut prev_channel_id = [0; 32];
+                                       let mut prev_channel_id = ChannelId::new_zero();
                                        let mut failed_next_destination_opt = UpgradableRequired(None);
                                        read_tlv_fields!(reader, {
                                                (0, prev_channel_id, required),
@@ -1490,7 +1504,7 @@ impl MaybeReadable for Event {
                        27u8 => Ok(None),
                        29u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut user_channel_id: u128 = 0;
                                        let mut counterparty_node_id = RequiredWrapper(None);
                                        let mut channel_type = RequiredWrapper(None);
@@ -1512,7 +1526,7 @@ impl MaybeReadable for Event {
                        },
                        31u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut user_channel_id: u128 = 0;
                                        let mut former_temporary_channel_id = None;
                                        let mut counterparty_node_id = RequiredWrapper(None);
@@ -1535,6 +1549,17 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
+                       33u8 => {
+                               let f = || {
+                                       _init_and_read_len_prefixed_tlv_fields!(reader, {
+                                               (0, payment_id, required),
+                                       });
+                                       Ok(Some(Event::InvoiceRequestFailed {
+                                               payment_id: payment_id.0.unwrap(),
+                                       }))
+                               };
+                               f()
+                       },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
                        // reads.
@@ -1667,7 +1692,7 @@ pub enum MessageSendEvent {
                /// The node_id of the node which should receive this message
                node_id: PublicKey,
                /// The message which should be sent.
-               msg: msgs::TxAddInput,
+               msg: msgs::TxAbort,
        },
        /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id.
        SendChannelReady {
diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs
new file mode 100644 (file)
index 0000000..826eaa8
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use bitcoin::secp256k1::Secp256k1;
+use crate::blinded_path::BlindedPath;
+use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
+use crate::events::MessageSendEventsProvider;
+use crate::ln::channelmanager;
+use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
+use crate::ln::features::Bolt12InvoiceFeatures;
+use crate::ln::functional_test_utils::*;
+use crate::ln::outbound_payment::Retry;
+use crate::prelude::*;
+use crate::routing::router::{PaymentParameters, RouteParameters};
+use crate::util::config::UserConfig;
+
+#[test]
+fn one_hop_blinded_path() {
+       do_one_hop_blinded_path(true);
+       do_one_hop_blinded_path(false);
+}
+
+fn do_one_hop_blinded_path(success: bool) {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let chan_upd = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0).0.contents;
+
+       let amt_msat = 5000;
+       let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(amt_msat), None);
+       let payee_tlvs = ReceiveTlvs {
+               payment_secret,
+               payment_constraints: PaymentConstraints {
+                       max_cltv_expiry: u32::max_value(),
+                       htlc_minimum_msat: chan_upd.htlc_minimum_msat,
+               },
+       };
+       let mut secp_ctx = Secp256k1::new();
+       let blinded_path = BlindedPath::one_hop_for_payment(
+               nodes[1].node.get_our_node_id(), payee_tlvs, &chanmon_cfgs[1].keys_manager, &secp_ctx
+       ).unwrap();
+
+       let route_params = RouteParameters {
+               payment_params: PaymentParameters::blinded(vec![blinded_path]),
+               final_value_msat: amt_msat
+       };
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(),
+       PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 1);
+       pass_along_route(&nodes[0], &[&[&nodes[1]]], amt_msat, payment_hash, payment_secret);
+       if success {
+               claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+       } else {
+               fail_payment(&nodes[0], &[&nodes[1]], payment_hash);
+       }
+}
+
+#[test]
+fn mpp_to_one_hop_blinded_path() {
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+       let mut secp_ctx = Secp256k1::new();
+
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       create_announced_chan_between_nodes(&nodes, 0, 2);
+       let chan_upd_1_3 = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents;
+       create_announced_chan_between_nodes(&nodes, 2, 3).0.contents;
+
+       let amt_msat = 15_000_000;
+       let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
+       let payee_tlvs = ReceiveTlvs {
+               payment_secret,
+               payment_constraints: PaymentConstraints {
+                       max_cltv_expiry: u32::max_value(),
+                       htlc_minimum_msat: chan_upd_1_3.htlc_minimum_msat,
+               },
+       };
+       let blinded_path = BlindedPath::one_hop_for_payment(
+               nodes[3].node.get_our_node_id(), payee_tlvs, &chanmon_cfgs[3].keys_manager, &secp_ctx
+       ).unwrap();
+
+       let bolt12_features: Bolt12InvoiceFeatures =
+               channelmanager::provided_invoice_features(&UserConfig::default()).to_context();
+       let route_params = RouteParameters {
+               payment_params: PaymentParameters::blinded(vec![blinded_path])
+                       .with_bolt12_features(bolt12_features).unwrap(),
+               final_value_msat: amt_msat,
+       };
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       check_added_monitors(&nodes[0], 2);
+
+       let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+
+       let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash.clone(),
+               Some(payment_secret), ev.clone(), false, None);
+
+       let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+       pass_along_path(&nodes[0], expected_route[1], amt_msat, payment_hash.clone(),
+               Some(payment_secret), ev.clone(), true, None);
+       claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
+}
index 18048d8efd87a50fea85cd037f375148b0f916c3..968de7b43b5e079d2dac2f522350900680e100ee 100644 (file)
@@ -450,7 +450,7 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
 /// channel basepoints via the new function, or they were obtained via
 /// CommitmentTransaction.trust().keys() because we trusted the source of the
 /// pre-calculated keys.
-#[derive(PartialEq, Eq, Clone)]
+#[derive(PartialEq, Eq, Clone, Debug)]
 pub struct TxCreationKeys {
        /// The broadcaster's per-commitment public key which was used to derive the other keys.
        pub per_commitment_point: PublicKey,
@@ -1028,7 +1028,7 @@ impl<'a> DirectedChannelTransactionParameters<'a> {
 /// Information needed to build and sign a holder's commitment transaction.
 ///
 /// The transaction is only signed once we are ready to broadcast.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct HolderCommitmentTransaction {
        inner: CommitmentTransaction,
        /// Our counterparty's signature for the transaction
@@ -1134,7 +1134,7 @@ impl HolderCommitmentTransaction {
 }
 
 /// A pre-built Bitcoin commitment transaction and its txid.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct BuiltCommitmentTransaction {
        /// The commitment transaction
        pub transaction: Transaction,
@@ -1305,7 +1305,7 @@ impl<'a> TrustedClosingTransaction<'a> {
 ///
 /// This class can be used inside a signer implementation to generate a signature given the relevant
 /// secret key.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub struct CommitmentTransaction {
        commitment_number: u64,
        to_broadcaster_value_sat: u64,
index 8e52af093d6c59e42a05d5a8e11d0307fcd8c367..33f4bbc8c26346a75af2b77880d2d7f8d0e06fa7 100644 (file)
@@ -20,10 +20,10 @@ use crate::chain::transaction::OutPoint;
 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
-use crate::ln::channel::AnnouncementSigsState;
+use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::errors::APIError;
 use crate::util::ser::{ReadableArgs, Writeable};
 use crate::util::test_utils::TestBroadcaster;
@@ -92,7 +92,7 @@ fn test_monitor_and_persister_update_fail() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
 
        // Route an HTLC from node 0 to node 1 (but don't settle)
-       let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+       let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
 
        // Make a copy of the ChainMonitor so we can capture the error it returns on a
        // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
@@ -111,7 +111,7 @@ fn test_monitor_and_persister_update_fail() {
        let chain_mon = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
                                &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -136,15 +136,18 @@ fn test_monitor_and_persister_update_fail() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Check that even though the persister is returning a InProgress,
-                       // because the update is bogus, ultimately the error that's returned
-                       // should be a PermanentFailure.
-                       if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
-                       logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               // Check that even though the persister is returning a InProgress,
+                               // because the update is bogus, ultimately the error that's returned
+                               // should be a PermanentFailure.
+                               if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
+                               logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
 
        check_added_monitors!(nodes[0], 1);
@@ -283,7 +286,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -855,7 +858,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
        // Route a first payment that we'll fail backwards
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
        nodes[2].node.fail_htlc_backwards(&payment_hash_1);
@@ -1125,7 +1128,7 @@ fn test_monitor_update_fail_reestablish() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
        create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
@@ -1341,7 +1344,7 @@ fn claim_while_disconnected_monitor_update_fail() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        // Forward a payment for B to claim
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@@ -1460,12 +1463,12 @@ fn monitor_failed_no_reestablish_response() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
        }
        {
                let mut node_1_per_peer_lock;
                let mut node_1_peer_state_lock;
-               get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
        }
 
        // Route the payment and deliver the initial commitment_signed (with a monitor update failure
@@ -1641,7 +1644,7 @@ fn test_monitor_update_fail_claim() {
        // Rebalance a bit so that we can send backwards from 3 to 2.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
@@ -1760,7 +1763,7 @@ fn test_monitor_update_on_pending_forwards() {
        // Rebalance a bit so that we can send backwards from 3 to 1.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
        nodes[2].node.fail_htlc_backwards(&payment_hash_1);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
        check_added_monitors!(nodes[2], 1);
@@ -1832,7 +1835,7 @@ fn monitor_update_claim_fail_no_response() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        // Forward a payment for B to claim
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -2174,7 +2177,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
 
        let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
        assert_eq!(bs_txn.len(), 1);
@@ -2340,7 +2343,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        //
        // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
        // (c) will not be freed from the holding cell.
-       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
+       let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
 
        nodes[0].node.send_payment_with_route(&route, payment_hash_1,
                RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
@@ -2514,7 +2517,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        let mut as_raa = None;
        if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
@@ -2744,8 +2747,8 @@ fn double_temp_error() {
 
        let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        // `claim_funds` results in a ChannelMonitorUpdate.
@@ -3035,15 +3038,15 @@ fn test_blocked_chan_preimage_release() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1).2;
-       create_announced_chan_between_nodes(&nodes, 1, 2).2;
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
 
        // Tee up two payments in opposite directions across nodes[1], one it sent to generate a
        // PaymentSent event and one it forwards.
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
 
        // Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
        nodes[2].node.claim_funds(payment_preimage_1);
@@ -3065,11 +3068,20 @@ fn test_blocked_chan_preimage_release() {
        let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
        check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
+       assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       // Finish the CS dance between nodes[0] and nodes[1].
-       do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
+       // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
+       // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
+       // channel.
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
+       assert!(a.is_none());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
        check_added_monitors(&nodes[1], 0);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 3);
@@ -3077,8 +3089,8 @@ fn test_blocked_chan_preimage_release() {
        if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
        if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
 
-       // The event processing should release the last RAA update.
-       check_added_monitors(&nodes[1], 1);
+       // The event processing should release the last RAA updates on both channels.
+       check_added_monitors(&nodes[1], 2);
 
        // When we fetch the next update the message getter will generate the next update for nodes[2],
        // generating a further monitor update.
@@ -3089,3 +3101,432 @@ fn test_blocked_chan_preimage_release() {
        do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
        expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
 }
+
+fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
+       // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
+       // from the downstream channel, we immediately claim the HTLC on the upstream channel, before
+       // even doing a `commitment_signed` dance on the downstream channel. This implies that our
+       // `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
+       // money, then we write the update that resolves the downstream node claiming their money. This
+       // is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
+       // generated, but of course this may not be the case. For asynchronous update writes, we have
+       // to ensure monitor updates can block each other, preventing the inversion all together.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+       // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+       // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
+       // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+
+       let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+       let mut manager_b = Vec::new();
+       if !with_latest_manager {
+               manager_b = nodes[1].node.encode();
+       }
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors(&nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 100_000);
+
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+       // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+       // for it since the monitor update is marked in-progress.
+       check_added_monitors(&nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
+       // won't get the preimage when the nodes reconnect and we have to get it from the
+       // ChannelMonitor.
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       if complete_bc_commitment_dance {
+               let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+               nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
+               check_added_monitors(&nodes[2], 1);
+               nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
+               check_added_monitors(&nodes[2], 1);
+               let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+               // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
+               // preimage in the A <-> B channel, which will prevent it from persisting the
+               // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage.
+               nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
+               check_added_monitors(&nodes[1], 0);
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       }
+
+       // Now reload node B
+       if with_latest_manager {
+               manager_b = nodes[1].node.encode();
+       }
+
+       let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+       reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+       if with_latest_manager {
+               // If we used the latest ChannelManager to reload from, we should have both channels still
+               // live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
+               // before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
+               // When we call `timer_tick_occurred` we will get that monitor update back, which we'll
+               // complete after reconnecting to our peers.
+               persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               nodes[1].node.timer_tick_occurred();
+               check_added_monitors(&nodes[1], 1);
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+               // Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
+               // the end go ahead and do that, though the
+               // `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
+               // expect to *not* receive the final RAA ChannelMonitorUpdate.
+               if complete_bc_commitment_dance {
+                       reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
+               } else {
+                       let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+                       reconnect_args.pending_responding_commitment_signed.1 = true;
+                       reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
+                       reconnect_args.pending_raa = (false, true);
+                       reconnect_nodes(reconnect_args);
+               }
+
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+
+               // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
+               // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
+               // process.
+               let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+               nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
+
+               // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
+               // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
+               // channel.
+       } else {
+               // If the ChannelManager used in the reload was stale, check that the B <-> C channel was
+               // closed.
+               //
+               // Note that this will also process the ChannelMonitorUpdates which were queued up when we
+               // reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
+               // force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
+               // commitment update will be allowed to go out.
+               check_added_monitors(&nodes[1], 0);
+               persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
+               check_added_monitors(&nodes[1], 2);
+
+               nodes[1].node.timer_tick_occurred();
+               check_added_monitors(&nodes[1], 0);
+
+               // Don't bother to reconnect B to C - that channel has been closed. We don't need to
+               // exchange any messages here even though there's a pending commitment update because the
+               // ChannelMonitorUpdate hasn't yet completed.
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+
+               let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+               nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
+
+               // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
+               // preimage (as it was a replay of the original ChannelMonitorUpdate from before we
+               // restarted). When we go to fetch the commitment transaction updates we'll poll the
+               // ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
+               // with the actual commitment transaction, which will allow us to fulfill the HTLC with
+               // node A.
+       }
+
+       let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+       check_added_monitors(&nodes[1], 1);
+
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+       do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
+
+       expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
+
+       // Finally, check that the payment was, ultimately, seen as sent by node A.
+       expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
+}
+
+#[test]
+fn test_inverted_mon_completion_order() {
+       do_test_inverted_mon_completion_order(true, true);
+       do_test_inverted_mon_completion_order(true, false);
+       do_test_inverted_mon_completion_order(false, true);
+       do_test_inverted_mon_completion_order(false, false);
+}
+
+fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
+       // Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
+       // is force-closed between when we generate the update on reload and when we go to handle the
+       // update or prior to generating the update at all.
+
+       if !close_chans_before_reload && close_only_a {
+               // If we're not closing, it makes no sense to "only close A"
+               panic!();
+       }
+
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+       // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+       // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
+       // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+       let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors(&nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+       // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+       // for it since the monitor update is marked in-progress.
+       check_added_monitors(&nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
+       // the preimage when the nodes reconnect, at which point we have to ensure we get it from the
+       // ChannelMonitor.
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+       let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+
+       if close_chans_before_reload {
+               if !close_only_a {
+                       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+                       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
+                       check_closed_broadcast(&nodes[1], 1, true);
+                       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
+               }
+
+               chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
+               check_closed_broadcast(&nodes[1], 1, true);
+               check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+       }
+
+       // Now reload node B
+       let manager_b = nodes[1].node.encode();
+       reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+       if close_chans_before_reload {
+               // If the channels were already closed, B will rebroadcast its closing transactions here.
+               let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+               if close_only_a {
+                       assert_eq!(bs_close_txn.len(), 2);
+               } else {
+                       assert_eq!(bs_close_txn.len(), 3);
+               }
+       }
+
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+       let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(as_closing_tx.len(), 1);
+
+       // In order to give A's closing transaction to B without processing background events first,
+       // use the _without_consistency_checks utility method. This is similar to connecting blocks
+       // during startup prior to the node being full initialized.
+       mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
+
+       // After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
+       // ChannelMonitor (possible twice), even though the channel has since been closed.
+       check_added_monitors(&nodes[1], 0);
+       let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
+       if hold_post_reload_mon_update {
+               for _ in 0..mons_added {
+                       persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+               }
+       }
+       nodes[1].node.timer_tick_occurred();
+       check_added_monitors(&nodes[1], mons_added);
+
+       // Finally, check that B created a payment preimage transaction and close out the payment.
+       let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
+       let bs_preimage_tx = &bs_txn[0];
+       check_spends!(bs_preimage_tx, as_closing_tx[0]);
+
+       if !close_chans_before_reload {
+               check_closed_broadcast(&nodes[1], 1, true);
+               check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
+       } else {
+               // While we forwarded the payment a while ago, we don't want to process events too early or
+               // we'll run background tasks we wanted to test individually.
+               expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
+       }
+
+       mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
+       check_closed_broadcast(&nodes[0], 1, true);
+       expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
+
+       if !close_chans_before_reload || close_only_a {
+               // Make sure the B<->C channel is still alive and well by sending a payment over it.
+               let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+               reconnect_args.pending_responding_commitment_signed.1 = true;
+               if !close_chans_before_reload {
+                       // TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
+                       // will consider the forwarded payment complete and allow the B<->C
+                       // `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
+                       // be allowed, and needs fixing.
+                       reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
+               }
+               reconnect_args.pending_raa.1 = true;
+
+               reconnect_nodes(reconnect_args);
+               let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+               nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
+               expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
+               if !close_chans_before_reload {
+                       // Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
+                       // channel will fly, removing the payment preimage from it.
+                       check_added_monitors(&nodes[1], 1);
+               }
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+               send_payment(&nodes[1], &[&nodes[2]], 100_000);
+       }
+}
+
+#[test]
+fn test_durable_preimages_on_closed_channel() {
+       do_test_durable_preimages_on_closed_channel(true, true, true);
+       do_test_durable_preimages_on_closed_channel(true, true, false);
+       do_test_durable_preimages_on_closed_channel(true, false, true);
+       do_test_durable_preimages_on_closed_channel(true, false, false);
+       do_test_durable_preimages_on_closed_channel(false, false, true);
+       do_test_durable_preimages_on_closed_channel(false, false, false);
+}
+
+fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
+       // Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
+       // before restart we run the monitor update completion action on startup.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+       // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+       // `update_fulfill_htlc`+`commitment_signed` we have a monitor update for both of B's channels.
+       // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor
+       // update pending, then reload B. At that point, the final monitor update on the B<->C channel
+       // is still pending because it can't fly until the preimage is persisted on the A<->B monitor.
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors(&nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+       // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+       // for it since the monitor update is marked in-progress.
+       check_added_monitors(&nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Now step the Commitment Signed Dance between B and C and check that after the final RAA B
+       // doesn't let the preimage-removing monitor update fly.
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+       nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+       check_added_monitors(&nodes[2], 1);
+       nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
+       check_added_monitors(&nodes[2], 1);
+
+       let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_final_raa);
+       check_added_monitors(&nodes[1], 0);
+
+       // Finally, reload node B and check that after we call `process_pending_events` once we realize
+       // we've completed the A<->B preimage-including monitor update and so can release the B<->C
+       // preimage-removing monitor update.
+       let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+       let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+       let manager_b = nodes[1].node.encode();
+       reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+       if close_during_reload {
+               // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
+               // (as learned about during the on-reload block connection).
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               check_closed_broadcast!(nodes[0], true);
+               check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+               let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+               mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
+       }
+
+       let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
+       let mut events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
+       expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
+       if close_during_reload {
+               match events[0] {
+                       Event::ChannelClosed { .. } => {},
+                       _ => panic!(),
+               }
+               check_closed_broadcast!(nodes[1], true);
+       }
+
+       // Once we run event processing the monitor should free, check that it was indeed the B<->C
+       // channel which was updated.
+       check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 });
+       let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
+       assert!(bc_update_id != post_ev_bc_update_id);
+
+       // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates
+       // fine.
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
+       send_payment(&nodes[1], &[&nodes[2]], 100_000);
+}
+
+#[test]
+fn test_reload_mon_update_completion_actions() {
+       do_test_reload_mon_update_completion_actions(true);
+       do_test_reload_mon_update_completion_actions(false);
+}
index f68d7433e4a0c6ebcb257ff96ab3ca7d38477e25..796c041f8185d78d68972370726fdccb96fb4fd5 100644 (file)
@@ -22,7 +22,7 @@ use bitcoin::secp256k1::{PublicKey,SecretKey};
 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
 use bitcoin::secp256k1;
 
-use crate::ln::{PaymentPreimage, PaymentHash};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::DecodeError;
@@ -532,7 +532,7 @@ pub(super) struct ReestablishResponses {
 /// channel's counterparty_node_id and channel_id).
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
-       Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
+       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>
 );
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -594,6 +594,9 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
 /// exceeding this age limit will be force-closed and purged from memory.
 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
 
+/// Number of blocks needed for an output from a coinbase transaction to be spendable.
+pub(crate) const COINBASE_MATURITY: u32 = 100;
+
 struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
 }
@@ -602,6 +605,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
 });
 
+/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
+/// its variants containing an appropriate channel struct.
+pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
+       UnfundedOutboundV1(OutboundV1Channel<SP>),
+       UnfundedInboundV1(InboundV1Channel<SP>),
+       Funded(Channel<SP>),
+}
+
+impl<'a, SP: Deref> ChannelPhase<SP> where
+       SP::Target: SignerProvider,
+       <SP::Target as SignerProvider>::Signer: ChannelSigner,
+{
+       pub fn context(&'a self) -> &'a ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(chan) => &chan.context,
+                       ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
+                       ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+               }
+       }
+
+       pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+               }
+       }
+}
+
 /// Contains all state common to unfunded inbound/outbound channels.
 pub(super) struct UnfundedChannelContext {
        /// A counter tracking how many ticks have elapsed since this unfunded channel was
@@ -637,8 +669,11 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        user_id: u128,
 
-       channel_id: [u8; 32],
-       temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
+       /// The current channel ID.
+       channel_id: ChannelId,
+       /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
+       /// Will be `None` for channels created prior to 0.0.115.
+       temporary_channel_id: Option<ChannelId>,
        channel_state: u32,
 
        // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
@@ -957,14 +992,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        // Public utilities:
 
-       pub fn channel_id(&self) -> [u8; 32] {
+       pub fn channel_id(&self) -> ChannelId {
                self.channel_id
        }
 
        // Return the `temporary_channel_id` used during channel establishment.
        //
        // Will return `None` for channels created prior to LDK version 0.0.115.
-       pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+       pub fn temporary_channel_id(&self) -> Option<ChannelId> {
                self.temporary_channel_id
        }
 
@@ -1232,7 +1267,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
                        commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
                        get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
-                       log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
+                       &self.channel_id,
+                       if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
 
                macro_rules! get_htlc_in_commitment {
                        ($htlc: expr, $offered: expr) => {
@@ -2029,11 +2065,6 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur
        (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
 }
 
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_funding_signed on an
-// inbound channel.
-//
 // Holder designates channel data owned for the benefit of the user client.
 // Counterparty designates channel data owned by the another channel participant entity.
 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
@@ -2222,7 +2253,7 @@ impl<SP: Deref> Channel<SP> where
                                        InboundHTLCState::LocalRemoved(ref reason) => {
                                                if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
                                                } else {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, log_bytes!(self.context.channel_id()));
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                }
                                                return UpdateFulfillFetch::DuplicateClaim {};
@@ -2275,7 +2306,7 @@ impl<SP: Deref> Channel<SP> where
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
@@ -2285,7 +2316,7 @@ impl<SP: Deref> Channel<SP> where
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
+                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
@@ -2303,7 +2334,7 @@ impl<SP: Deref> Channel<SP> where
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
                                return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
                        }
-                       log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, log_bytes!(self.context.channel_id));
+                       log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
                }
 
@@ -2442,7 +2473,7 @@ impl<SP: Deref> Channel<SP> where
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+                       log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
                                htlc_id: htlc_id_arg,
                                err_packet,
@@ -2450,7 +2481,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
                {
                        let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
@@ -2493,7 +2524,7 @@ impl<SP: Deref> Channel<SP> where
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
                let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
                let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
@@ -2547,7 +2578,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.cur_holder_commitment_transaction_number -= 1;
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
-               log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
+               log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
 
                let need_channel_ready = self.check_get_channel_ready(0).is_some();
                self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
@@ -2621,7 +2652,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
                self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
 
-               log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
+               log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
 
                Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
        }
@@ -2749,7 +2780,7 @@ impl<SP: Deref> Channel<SP> where
                        if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
                                // Note that if the pending_forward_status is not updated here, then it's because we're already failing
                                // the HTLC, i.e. its status is already set to failing.
-                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
+                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                } else {
@@ -2877,7 +2908,7 @@ impl<SP: Deref> Channel<SP> where
                        log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
                                log_bytes!(msg.signature.serialize_compact()[..]),
                                log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
-                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
                                return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
                        }
@@ -2947,7 +2978,7 @@ impl<SP: Deref> Channel<SP> where
                                let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
-                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
+                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
                                if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
                                        return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
                                }
@@ -2991,7 +3022,7 @@ impl<SP: Deref> Channel<SP> where
                        } else { None };
                        if let Some(forward_info) = new_forward {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
-                                       &htlc.payment_hash, log_bytes!(self.context.channel_id));
+                                       &htlc.payment_hash, &self.context.channel_id);
                                htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
                                need_commitment = true;
                        }
@@ -3000,7 +3031,7 @@ impl<SP: Deref> Channel<SP> where
                for htlc in self.context.pending_outbound_htlcs.iter_mut() {
                        if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
-                                       &htlc.payment_hash, log_bytes!(self.context.channel_id));
+                                       &htlc.payment_hash, &self.context.channel_id);
                                // Grab the preimage, if it exists, instead of cloning
                                let mut reason = OutboundHTLCOutcome::Success(None);
                                mem::swap(outcome, &mut reason);
@@ -3050,7 +3081,7 @@ impl<SP: Deref> Channel<SP> where
                                monitor_update.updates.append(&mut additional_update.updates);
                        }
                        log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
-                               log_bytes!(self.context.channel_id));
+                               &self.context.channel_id);
                        return Ok(self.push_ret_blockable_mon_update(monitor_update));
                }
 
@@ -3067,7 +3098,7 @@ impl<SP: Deref> Channel<SP> where
                } else { false };
 
                log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
-                       log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+                       &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
                self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
                return Ok(self.push_ret_blockable_mon_update(monitor_update));
        }
@@ -3096,7 +3127,7 @@ impl<SP: Deref> Channel<SP> where
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
-                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
+                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
 
                        let mut monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
@@ -3127,8 +3158,7 @@ impl<SP: Deref> Channel<SP> where
                                                        Err(e) => {
                                                                match e {
                                                                        ChannelError::Ignore(ref msg) => {
-                                                                               log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
-                                                                                       &payment_hash, msg, log_bytes!(self.context.channel_id()));
+                                                                               log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
                                                                                // If we fail to send here, then this HTLC should
                                                                                // be failed backwards. Failing to send here
                                                                                // indicates that this HTLC may keep being put back
@@ -3194,7 +3224,7 @@ impl<SP: Deref> Channel<SP> where
                        monitor_update.updates.append(&mut additional_update.updates);
 
                        log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
-                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+                               &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
                                update_add_count, update_fulfill_count, update_fail_count);
 
                        self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
@@ -3283,7 +3313,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
                }
 
-               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
                let mut to_forward_infos = Vec::new();
                let mut revoked_htlcs = Vec::new();
                let mut finalized_claimed_htlcs = Vec::new();
@@ -3428,7 +3458,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.context.monitor_pending_failures.append(&mut revoked_htlcs);
                        self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
-                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
+                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
                        return_with_htlcs_to_fail!(Vec::new());
                }
 
@@ -3440,7 +3470,7 @@ impl<SP: Deref> Channel<SP> where
                                monitor_update.updates.append(&mut additional_update.updates);
 
                                log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
-                                       log_bytes!(self.context.channel_id()), release_state_str);
+                                       &self.context.channel_id(), release_state_str);
 
                                self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
                                return_with_htlcs_to_fail!(htlcs_to_fail);
@@ -3455,7 +3485,7 @@ impl<SP: Deref> Channel<SP> where
                                        monitor_update.updates.append(&mut additional_update.updates);
 
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
-                                               log_bytes!(self.context.channel_id()),
+                                               &self.context.channel_id(),
                                                update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
                                                release_state_str);
 
@@ -3463,7 +3493,7 @@ impl<SP: Deref> Channel<SP> where
                                        return_with_htlcs_to_fail!(htlcs_to_fail);
                                } else {
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
-                                               log_bytes!(self.context.channel_id()), release_state_str);
+                                               &self.context.channel_id(), release_state_str);
 
                                        self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
                                        return_with_htlcs_to_fail!(htlcs_to_fail);
@@ -3625,7 +3655,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.sent_message_awaiting_response = None;
 
                self.context.channel_state |= ChannelState::PeerDisconnected as u32;
-               log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
@@ -3728,7 +3758,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.monitor_pending_commitment_signed = false;
                let order = self.context.resend_order.clone();
                log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
-                       log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+                       &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
@@ -3840,7 +3870,7 @@ impl<SP: Deref> Channel<SP> where
                } else { None };
 
                log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
-                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+                               &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
                msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
@@ -3896,8 +3926,8 @@ impl<SP: Deref> Channel<SP> where
                        if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
                                macro_rules! log_and_panic {
                                        ($err_msg: expr) => {
-                                               log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
-                                               panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
+                                               log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
+                                               panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
                                        }
                                }
                                log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
@@ -3996,9 +4026,9 @@ impl<SP: Deref> Channel<SP> where
 
                if msg.next_local_commitment_number == next_counterparty_commitment_number {
                        if required_revoke.is_some() {
-                               log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
                        } else {
-                               log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
                        }
 
                        Ok(ReestablishResponses {
@@ -4009,9 +4039,9 @@ impl<SP: Deref> Channel<SP> where
                        })
                } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
                        if required_revoke.is_some() {
-                               log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
                        } else {
-                               log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
                        }
 
                        if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
@@ -4731,12 +4761,14 @@ impl<SP: Deref> Channel<SP> where
                                                        return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
                                                } else {
                                                        if self.context.is_outbound() {
-                                                               for input in tx.input.iter() {
-                                                                       if input.witness.is_empty() {
-                                                                               // We generated a malleable funding transaction, implying we've
-                                                                               // just exposed ourselves to funds loss to our counterparty.
-                                                                               #[cfg(not(fuzzing))]
-                                                                               panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+                                                               if !tx.is_coin_base() {
+                                                                       for input in tx.input.iter() {
+                                                                               if input.witness.is_empty() {
+                                                                                       // We generated a malleable funding transaction, implying we've
+                                                                                       // just exposed ourselves to funds loss to our counterparty.
+                                                                                       #[cfg(not(fuzzing))]
+                                                                                       panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+                                                                               }
                                                                        }
                                                                }
                                                        }
@@ -4747,19 +4779,26 @@ impl<SP: Deref> Channel<SP> where
                                                                Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
                                                        }
                                                }
+                                               // If this is a coinbase transaction and not a 0-conf channel
+                                               // we should update our min_depth to 100 to handle coinbase maturity
+                                               if tx.is_coin_base() &&
+                                                       self.context.minimum_depth.unwrap_or(0) > 0 &&
+                                                       self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+                                                       self.context.minimum_depth = Some(COINBASE_MATURITY);
+                                               }
                                        }
                                        // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
                                        if let Some(channel_ready) = self.check_get_channel_ready(height) {
-                                               log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
+                                               log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                                                let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
                                                return Ok((Some(channel_ready), announcement_sigs));
                                        }
                                }
                                for inp in tx.input.iter() {
                                        if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
-                                               log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
+                                               log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
                                                return Err(ClosureReason::CommitmentTxConfirmed);
                                        }
                                }
@@ -4821,7 +4860,7 @@ impl<SP: Deref> Channel<SP> where
                        let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
                                self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
                        } else { None };
-                       log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
+                       log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                        return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
 
@@ -4852,7 +4891,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
-                       log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id));
+                       log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
                        assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
                        assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
@@ -4962,7 +5001,7 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
-               log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
                let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
                        Ok(a) => a,
                        Err(e) => {
@@ -5097,10 +5136,10 @@ impl<SP: Deref> Channel<SP> where
                let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
                let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
                        let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
-                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
+                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
                        remote_last_secret
                } else {
-                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
+                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
                        [0;32]
                };
                self.mark_awaiting_response();
@@ -5381,14 +5420,14 @@ impl<SP: Deref> Channel<SP> where
                                        log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
                                                encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
                                                &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
-                                               log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+                                               log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
 
                                        for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
                                                log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
                                                        encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
                                                        encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
                                                        log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
-                                                       log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+                                                       log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
                                        }
                                }
 
@@ -5636,7 +5675,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
                };
 
-               let temporary_channel_id = entropy_source.get_secure_random_bytes();
+               let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
 
                Ok(Self {
                        context: ChannelContext {
@@ -5818,6 +5857,15 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                self.context.channel_state = ChannelState::FundingCreated as u32;
                self.context.channel_id = funding_txo.to_channel_id();
+
+               // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
+               // We can skip this if it is a zero-conf channel.
+               if funding_transaction.is_coin_base() &&
+                       self.context.minimum_depth.unwrap_or(0) > 0 &&
+                       self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+                       self.context.minimum_depth = Some(COINBASE_MATURITY);
+               }
+
                self.context.funding_transaction = Some(funding_transaction);
 
                let channel = Channel {
@@ -6484,7 +6532,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
                                log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
                                encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
-                               encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+                               encode::serialize_hex(&funding_script), &self.context.channel_id());
                        secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
 
@@ -6494,7 +6542,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
                match &self.context.holder_signer {
                        // TODO (arik): move match into calling method for Taproot
@@ -6588,7 +6636,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
 
-               log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
+               log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
 
                // Promote the channel to a full-fledged one now that we have updated the state and have a
                // `ChannelMonitor`.
@@ -7258,7 +7306,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let mut user_id_high_opt: Option<u64> = None;
                let mut channel_keys_id: Option<[u8; 32]> = None;
-               let mut temporary_channel_id: Option<[u8; 32]> = None;
+               let mut temporary_channel_id: Option<ChannelId> = None;
                let mut holder_max_accepted_htlcs: Option<u16> = None;
 
                let mut blocked_monitor_updates = Some(Vec::new());
diff --git a/lightning/src/ln/channel_id.rs b/lightning/src/ln/channel_id.rs
new file mode 100644 (file)
index 0000000..cdbcd30
--- /dev/null
@@ -0,0 +1,143 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! ChannelId definition.
+
+use crate::ln::msgs::DecodeError;
+use crate::sign::EntropySource;
+use crate::util::ser::{Readable, Writeable, Writer};
+
+use bitcoin::hashes::hex::ToHex;
+
+use crate::io;
+use crate::prelude::*;
+use core::fmt;
+use core::ops::Deref;
+
+/// A unique 32-byte identifier for a channel.
+/// Depending on how the ID is generated, several varieties are distinguished
+/// (but all are stored as 32 bytes):
+///   _v1_ and _temporary_.
+/// A _v1_ channel ID is generated based on funding tx outpoint (txid & index).
+/// A _temporary_ ID is generated randomly.
+/// (Later revocation-point-based _v2_ is a possibility.)
+/// The variety (context) is not stored, it is relevant only at creation.
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly.
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct ChannelId(pub [u8; 32]);
+
+impl ChannelId {
+       /// Create _v1_ channel ID based on a funding TX ID and output index
+       pub fn v1_from_funding_txid(txid: &[u8; 32], output_index: u16) -> Self {
+               let mut res = [0; 32];
+               res[..].copy_from_slice(&txid[..]);
+               res[30] ^= ((output_index >> 8) & 0xff) as u8;
+               res[31] ^= ((output_index >> 0) & 0xff) as u8;
+               Self(res)
+       }
+
+       /// Create a _temporary_ channel ID randomly, based on an entropy source.
+       pub fn temporary_from_entropy_source<ES: Deref>(entropy_source: &ES) -> Self
+       where ES::Target: EntropySource {
+               Self(entropy_source.get_secure_random_bytes())
+       }
+
+       /// Generic constructor; create a new channel ID from the provided data.
+       /// Use a more specific `*_from_*` constructor when possible.
+       pub fn from_bytes(data: [u8; 32]) -> Self {
+               Self(data)
+       }
+
+       /// Create a channel ID consisting of all-zeros data (e.g. when uninitialized or a placeholder).
+       pub fn new_zero() -> Self {
+               Self([0; 32])
+       }
+
+       /// Check whether ID is consisting of all zeros (uninitialized)
+       pub fn is_zero(&self) -> bool {
+               self.0[..] == [0; 32]
+       }
+}
+
+impl Writeable for ChannelId {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.0.write(w)
+       }
+}
+
+impl Readable for ChannelId {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let buf: [u8; 32] = Readable::read(r)?;
+               Ok(ChannelId(buf))
+       }
+}
+
+impl ToHex for ChannelId {
+       fn to_hex(&self) -> String {
+               self.0.to_hex()
+       }
+}
+
+impl fmt::Display for ChannelId {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               crate::util::logger::DebugBytes(&self.0).fmt(f)
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use crate::ln::ChannelId;
+       use crate::util::ser::{Readable, Writeable};
+       use crate::util::test_utils;
+       use bitcoin::hashes::hex::ToHex;
+       use crate::prelude::*;
+       use crate::io;
+
+       #[test]
+       fn test_channel_id_v1_from_funding_txid() {
+               let channel_id = ChannelId::v1_from_funding_txid(&[2; 32], 1);
+               assert_eq!(channel_id.to_hex(), "0202020202020202020202020202020202020202020202020202020202020203");
+       }
+
+       #[test]
+       fn test_channel_id_new_from_data() {
+               let data: [u8; 32] = [2; 32];
+               let channel_id = ChannelId::from_bytes(data.clone());
+               assert_eq!(channel_id.0, data);
+       }
+
+       #[test]
+       fn test_channel_id_equals() {
+               let channel_id11 = ChannelId::v1_from_funding_txid(&[2; 32], 2);
+               let channel_id12 = ChannelId::v1_from_funding_txid(&[2; 32], 2);
+               let channel_id21 = ChannelId::v1_from_funding_txid(&[2; 32], 42);
+               assert_eq!(channel_id11, channel_id12);
+               assert_ne!(channel_id11, channel_id21);
+       }
+
+       #[test]
+       fn test_channel_id_write_read() {
+               let data: [u8; 32] = [2; 32];
+               let channel_id = ChannelId::from_bytes(data.clone());
+
+               let mut w = test_utils::TestVecWriter(Vec::new());
+               channel_id.write(&mut w).unwrap();
+
+               let channel_id_2 = ChannelId::read(&mut io::Cursor::new(&w.0)).unwrap();
+               assert_eq!(channel_id_2, channel_id);
+               assert_eq!(channel_id_2.0, data);
+       }
+
+       #[test]
+       fn test_channel_id_display() {
+               let channel_id = ChannelId::v1_from_funding_txid(&[2; 32], 1);
+               assert_eq!(format!("{}", &channel_id), "0202020202020202020202020202020202020202020202020202020202020203");
+       }
+}
index 55980dd7acfb48036c197fb02fe86c71d9ee6117..90451d59d66d635821ffbe3ce32790d471290c6c 100644 (file)
@@ -39,8 +39,8 @@ use crate::events;
 use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
-use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
+use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
@@ -177,7 +177,7 @@ pub(super) enum HTLCForwardInfo {
 }
 
 /// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, Hash, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub(crate) struct HTLCPreviousHopData {
        // Note that this may be an outbound SCID alias for the associated channel.
        short_channel_id: u64,
@@ -233,11 +233,17 @@ impl From<&ClaimableHTLC> for events::ClaimedHTLC {
        }
 }
 
-/// A payment identifier used to uniquely identify a payment to LDK.
+/// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+/// a payment and ensure idempotency in LDK.
 ///
 /// This is not exported to bindings users as we just use [u8; 32] directly
 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentId(pub [u8; 32]);
+pub struct PaymentId(pub [u8; Self::LENGTH]);
+
+impl PaymentId {
+       /// Number of bytes in the id.
+       pub const LENGTH: usize = 32;
+}
 
 impl Writeable for PaymentId {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
@@ -277,7 +283,7 @@ impl Readable for InterceptId {
        }
 }
 
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
 /// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
 pub(crate) enum SentHTLCId {
        PreviousHopData { short_channel_id: u64, htlc_id: u64 },
@@ -308,7 +314,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
 
 /// Tracks the inbound corresponding to an outbound HTLC
 #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub(crate) enum HTLCSource {
        PreviousHopData(HTLCPreviousHopData),
        OutboundRoute {
@@ -413,13 +419,13 @@ impl Into<u16> for FailureCode {
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
-       chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
+       chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
        channel_capacity: Option<u64>,
 }
 impl MsgHandleErrInternal {
        #[inline]
-       fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self {
+       fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
                Self {
                        err: LightningError {
                                err: err.clone(),
@@ -440,7 +446,7 @@ impl MsgHandleErrInternal {
                Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
                Self {
                        err: LightningError {
                                err: err.clone(),
@@ -457,7 +463,7 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
+       fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
                Self {
                        err: match err {
                                ChannelError::Warn(msg) =>  LightningError {
@@ -489,6 +495,10 @@ impl MsgHandleErrInternal {
                        channel_capacity: None,
                }
        }
+
+       fn closes_channel(&self) -> bool {
+               self.chan_id.is_some()
+       }
 }
 
 /// We hold back HTLCs we intend to relay for a random interval greater than this (see
@@ -582,7 +592,7 @@ enum BackgroundEvent {
        /// on a channel.
        MonitorUpdatesComplete {
                counterparty_node_id: PublicKey,
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
        },
 }
 
@@ -643,14 +653,13 @@ pub(crate) enum RAAMonitorUpdateBlockingAction {
        /// durably to disk.
        ForwardedPaymentInboundClaim {
                /// The upstream channel ID (i.e. the inbound edge).
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The HTLC ID on the inbound edge.
                htlc_id: u64,
        },
 }
 
 impl RAAMonitorUpdateBlockingAction {
-       #[allow(unused)]
        fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
                Self::ForwardedPaymentInboundClaim {
                        channel_id: prev_hop.outpoint.to_channel_id(),
@@ -666,29 +675,17 @@ impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
 
 /// State we hold per-peer.
 pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
-       /// `channel_id` -> `Channel`.
-       ///
-       /// Holds all funded channels where the peer is the counterparty.
-       pub(super) channel_by_id: HashMap<[u8; 32], Channel<SP>>,
-       /// `temporary_channel_id` -> `OutboundV1Channel`.
-       ///
-       /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
-       /// been assigned a `channel_id`, the entry in this map is removed and one is created in
-       /// `channel_by_id`.
-       pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel<SP>>,
-       /// `temporary_channel_id` -> `InboundV1Channel`.
-       ///
-       /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
-       /// been assigned a `channel_id`, the entry in this map is removed and one is created in
-       /// `channel_by_id`.
-       pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel<SP>>,
+       /// `channel_id` -> `ChannelPhase`
+       ///
+       /// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty.
+       pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
        /// `temporary_channel_id` -> `InboundChannelRequest`.
        ///
        /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
        /// the peer is the counterparty. If the channel is accepted, then the entry in this table is
        /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If
        /// the channel is rejected, then the entry is simply removed.
-       pub(super) inbound_channel_request_by_id: HashMap<[u8; 32], InboundChannelRequest>,
+       pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
        /// The latest `InitFeatures` we heard from the peer.
        latest_features: InitFeatures,
        /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
@@ -715,12 +712,12 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
        /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
        /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
        /// duplicates do not occur, so such channels should fail without a monitor update completing.
-       monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
+       monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
        /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
        /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
        /// will remove a preimage that needs to be durably in an upstream channel first), we put an
        /// entry here to note that the channel with the key's ID is blocked on a set of actions.
-       actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+       actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
@@ -735,23 +732,19 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
-               self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+               self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+                       && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
 
        // Returns a count of all channels we have with this peer, including unfunded channels.
        fn total_channel_count(&self) -> usize {
-               self.channel_by_id.len() +
-                       self.outbound_v1_channel_by_id.len() +
-                       self.inbound_v1_channel_by_id.len() +
-                       self.inbound_channel_request_by_id.len()
+               self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
        }
 
        // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
-       fn has_channel(&self, channel_id: &[u8; 32]) -> bool {
+       fn has_channel(&self, channel_id: &ChannelId) -> bool {
                self.channel_by_id.contains_key(channel_id) ||
-                       self.outbound_v1_channel_by_id.contains_key(channel_id) ||
-                       self.inbound_v1_channel_by_id.contains_key(channel_id) ||
                        self.inbound_channel_request_by_id.contains_key(channel_id)
        }
 }
@@ -1099,7 +1092,7 @@ where
        /// required to access the channel with the `counterparty_node_id`.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
+       id_to_peer: Mutex<HashMap<ChannelId, PublicKey>>,
 
        /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
        ///
@@ -1113,9 +1106,9 @@ where
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        #[cfg(test)]
-       pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+       pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
        #[cfg(not(test))]
-       short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+       short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
 
        our_network_pubkey: PublicKey,
 
@@ -1196,7 +1189,8 @@ where
 
        background_events_processed_since_startup: AtomicBool,
 
-       persistence_notifier: Notifier,
+       event_persist_notifier: Notifier,
+       needs_persist_flag: AtomicBool,
 
        entropy_source: ES,
        node_signer: NS,
@@ -1225,7 +1219,8 @@ pub struct ChainParameters {
 #[must_use]
 enum NotifyOption {
        DoPersist,
-       SkipPersist,
+       SkipPersistHandleEvents,
+       SkipPersistNoEvents,
 }
 
 /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
@@ -1238,43 +1233,75 @@ enum NotifyOption {
 /// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
 /// notify or not based on whether relevant changes have been made, providing a closure to
 /// `optionally_notify` which returns a `NotifyOption`.
-struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
-       persistence_notifier: &'a Notifier,
+struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
+       event_persist_notifier: &'a Notifier,
+       needs_persist_flag: &'a AtomicBool,
        should_persist: F,
        // We hold onto this result so the lock doesn't get released immediately.
        _read_guard: RwLockReadGuard<'a, ()>,
 }
 
 impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
-       fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+       /// Notifies any waiters and indicates that we need to persist, in addition to possibly having
+       /// events to handle.
+       ///
+       /// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
+       /// other cases where losing the changes on restart may result in a force-close or otherwise
+       /// isn't ideal.
+       fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
+               Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
+       }
+
+       fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
+       -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
                let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
-               let _ = cm.get_cm().process_background_events(); // We always persist
+               let force_notify = cm.get_cm().process_background_events();
 
                PersistenceNotifierGuard {
-                       persistence_notifier: &cm.get_cm().persistence_notifier,
-                       should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+                       event_persist_notifier: &cm.get_cm().event_persist_notifier,
+                       needs_persist_flag: &cm.get_cm().needs_persist_flag,
+                       should_persist: move || {
+                               // Pick the "most" action between `persist_check` and the background events
+                               // processing and return that.
+                               let notify = persist_check();
+                               match (notify, force_notify) {
+                                       (NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
+                                       (_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
+                                       (NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
+                                       (_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
+                                       _ => NotifyOption::SkipPersistNoEvents,
+                               }
+                       },
                        _read_guard: read_guard,
                }
-
        }
 
        /// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
-       /// [`ChannelManager::process_background_events`] MUST be called first.
-       fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
-               let read_guard = lock.read().unwrap();
+       /// [`ChannelManager::process_background_events`] MUST be called first (or
+       /// [`Self::optionally_notify`] used).
+       fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
+       (cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
+               let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
 
                PersistenceNotifierGuard {
-                       persistence_notifier: notifier,
+                       event_persist_notifier: &cm.get_cm().event_persist_notifier,
+                       needs_persist_flag: &cm.get_cm().needs_persist_flag,
                        should_persist: persist_check,
                        _read_guard: read_guard,
                }
        }
 }
 
-impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
+impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
        fn drop(&mut self) {
-               if (self.should_persist)() == NotifyOption::DoPersist {
-                       self.persistence_notifier.notify();
+               match (self.should_persist)() {
+                       NotifyOption::DoPersist => {
+                               self.needs_persist_flag.store(true, Ordering::Release);
+                               self.event_persist_notifier.notify()
+                       },
+                       NotifyOption::SkipPersistHandleEvents =>
+                               self.event_persist_notifier.notify(),
+                       NotifyOption::SkipPersistNoEvents => {},
                }
        }
 }
@@ -1336,11 +1363,6 @@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_G
 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
 pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
 
-/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
-/// idempotency of payments by [`PaymentId`]. See
-/// [`OutboundPayments::remove_stale_resolved_payments`].
-pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
-
 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
 /// until we mark the channel disabled and gossip the update.
 pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
@@ -1417,7 +1439,7 @@ pub struct ChannelDetails {
        /// thereafter this is the txid of the funding transaction xor the funding transaction output).
        /// Note that this means this value is *not* persistent - it can change once during the
        /// lifetime of the channel.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// Parameters which apply to our counterparty. See individual fields for more information.
        pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
@@ -1683,8 +1705,17 @@ pub enum ChannelShutdownState {
 /// These include payments that have yet to find a successful path, or have unresolved HTLCs.
 #[derive(Debug, PartialEq)]
 pub enum RecentPaymentDetails {
+       /// When an invoice was requested and thus a payment has not yet been sent.
+       AwaitingInvoice {
+               /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+               /// a payment and ensure idempotency in LDK.
+               payment_id: PaymentId,
+       },
        /// When a payment is still being sent and awaiting successful delivery.
        Pending {
+               /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+               /// a payment and ensure idempotency in LDK.
+               payment_id: PaymentId,
                /// Hash of the payment that is currently being sent but has yet to be fulfilled or
                /// abandoned.
                payment_hash: PaymentHash,
@@ -1696,6 +1727,9 @@ pub enum RecentPaymentDetails {
        /// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
        /// payment is removed from tracking.
        Fulfilled {
+               /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+               /// a payment and ensure idempotency in LDK.
+               payment_id: PaymentId,
                /// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
                /// made before LDK version 0.0.104.
                payment_hash: Option<PaymentHash>,
@@ -1704,6 +1738,9 @@ pub enum RecentPaymentDetails {
        /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
        /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
        Abandoned {
+               /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+               /// a payment and ensure idempotency in LDK.
+               payment_id: PaymentId,
                /// Hash of the payment that we have given up trying to send.
                payment_hash: PaymentHash,
        },
@@ -1806,45 +1843,55 @@ macro_rules! update_maps_on_chan_removal {
 }
 
 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
-macro_rules! convert_chan_err {
-       ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
+macro_rules! convert_chan_phase_err {
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
                match $err {
                        ChannelError::Warn(msg) => {
-                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
                        },
                        ChannelError::Ignore(msg) => {
-                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
                        },
                        ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, &$channel.context);
+                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
+                               update_maps_on_chan_removal!($self, $channel.context);
                                let shutdown_res = $channel.context.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
-                                       shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis()))
+                               let user_id = $channel.context.get_user_id();
+                               let channel_capacity_satoshis = $channel.context.get_value_satoshis();
+
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
+                                       shutdown_res, $channel_update, channel_capacity_satoshis))
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
-               match $err {
-                       // We should only ever have `ChannelError::Close` when unfunded channels error.
-                       // In any case, just close the channel.
-                       ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, &$channel_context);
-                               let shutdown_res = $channel_context.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
-                                       shutdown_res, None, $channel_context.get_value_satoshis()))
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
+               convert_chan_phase_err!($self, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
+       };
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
+               convert_chan_phase_err!($self, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
+       };
+       ($self: ident, $err: expr, $channel_phase: expr, $channel_id: expr) => {
+               match $channel_phase {
+                       ChannelPhase::Funded(channel) => {
+                               convert_chan_phase_err!($self, $err, channel, $channel_id, FUNDED_CHANNEL)
+                       },
+                       ChannelPhase::UnfundedOutboundV1(channel) => {
+                               convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+                       },
+                       ChannelPhase::UnfundedInboundV1(channel) => {
+                               convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
                }
-       }
+       };
 }
 
-macro_rules! break_chan_entry {
+macro_rules! break_chan_phase_entry {
        ($self: ident, $res: expr, $entry: expr) => {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
+                               let key = *$entry.key();
+                               let (drop, res) = convert_chan_phase_err!($self, e, $entry.get_mut(), &key);
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1854,27 +1901,13 @@ macro_rules! break_chan_entry {
        }
 }
 
-macro_rules! try_v1_outbound_chan_entry {
-       ($self: ident, $res: expr, $entry: expr) => {
-               match $res {
-                       Ok(res) => res,
-                       Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
-                               if drop {
-                                       $entry.remove_entry();
-                               }
-                               return Err(res);
-                       }
-               }
-       }
-}
-
-macro_rules! try_chan_entry {
+macro_rules! try_chan_phase_entry {
        ($self: ident, $res: expr, $entry: expr) => {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
+                               let key = *$entry.key();
+                               let (drop, res) = convert_chan_phase_err!($self, e, $entry.get_mut(), &key);
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1884,11 +1917,11 @@ macro_rules! try_chan_entry {
        }
 }
 
-macro_rules! remove_channel {
+macro_rules! remove_channel_phase {
        ($self: expr, $entry: expr) => {
                {
                        let channel = $entry.remove_entry().1;
-                       update_maps_on_chan_removal!($self, &channel.context);
+                       update_maps_on_chan_removal!($self, &channel.context());
                        channel
                }
        }
@@ -2002,12 +2035,12 @@ macro_rules! handle_new_monitor_update {
                match $update_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
-                                       log_bytes!($chan.context.channel_id()[..]));
+                                       &$chan.context.channel_id());
                                Ok(false)
                        },
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
-                                       log_bytes!($chan.context.channel_id()[..]));
+                                       &$chan.context.channel_id());
                                update_maps_on_chan_removal!($self, &$chan.context);
                                let res = Err(MsgHandleErrInternal::from_finish_shutdown(
                                        "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
@@ -2028,7 +2061,20 @@ macro_rules! handle_new_monitor_update {
                        handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
        };
        ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
-               handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_INITIAL_MONITOR, $chan_entry.remove_entry())
+               if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
+                       handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
+                               $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
+               } else {
+                       // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
+                       // update).
+                       debug_assert!(false);
+                       let channel_id = *$chan_entry.key();
+                       let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
+                               "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
+                               $chan_entry.get_mut(), &channel_id);
+                       $chan_entry.remove();
+                       Err(err)
+               }
        };
        ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
                let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
@@ -2052,7 +2098,20 @@ macro_rules! handle_new_monitor_update {
                        })
        } };
        ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
-               handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+               if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
+                       handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
+                               $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
+               } else {
+                       // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
+                       // update).
+                       debug_assert!(false);
+                       let channel_id = *$chan_entry.key();
+                       let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
+                               "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
+                               $chan_entry.get_mut(), &channel_id);
+                       $chan_entry.remove();
+                       Err(err)
+               }
        }
 }
 
@@ -2064,7 +2123,7 @@ macro_rules! process_events_body {
                                return;
                        }
 
-                       let mut result = NotifyOption::SkipPersist;
+                       let mut result;
 
                        {
                                // We'll acquire our total consistency lock so that we can be sure no other
@@ -2073,7 +2132,7 @@ macro_rules! process_events_body {
 
                                // Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
                                // ensure any startup-generated background events are handled first.
-                               if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+                               result = $self.process_background_events();
 
                                // TODO: This behavior should be documented. It's unintuitive that we query
                                // ChannelMonitors when clearing other events.
@@ -2113,8 +2172,14 @@ macro_rules! process_events_body {
                                processed_all_events = false;
                        }
 
-                       if result == NotifyOption::DoPersist {
-                               $self.persistence_notifier.notify();
+                       match result {
+                               NotifyOption::DoPersist => {
+                                       $self.needs_persist_flag.store(true, Ordering::Release);
+                                       $self.event_persist_notifier.notify();
+                               },
+                               NotifyOption::SkipPersistHandleEvents =>
+                                       $self.event_persist_notifier.notify(),
+                               NotifyOption::SkipPersistNoEvents => {},
                        }
                }
        }
@@ -2193,7 +2258,9 @@ where
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
                        background_events_processed_since_startup: AtomicBool::new(false),
-                       persistence_notifier: Notifier::new(),
+
+                       event_persist_notifier: Notifier::new(),
+                       needs_persist_flag: AtomicBool::new(false),
 
                        entropy_source,
                        node_signer,
@@ -2256,7 +2323,7 @@ where
        /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
        /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
        /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
-       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
+       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
                if channel_value_satoshis < 1000 {
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
@@ -2289,7 +2356,7 @@ where
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
                let temporary_channel_id = channel.context.channel_id();
-               match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) {
+               match peer_state.channel_by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(fuzzing) {
                                        return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
@@ -2297,7 +2364,7 @@ where
                                        panic!("RNG is bad???");
                                }
                        },
-                       hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
+                       hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
                }
 
                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
@@ -2307,7 +2374,7 @@ where
                Ok(temporary_channel_id)
        }
 
-       fn list_funded_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
+       fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
                // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
@@ -2321,12 +2388,18 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               // Only `Channels` in the channel_by_id map can be considered funded.
-                               for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone(), &self.fee_estimator);
-                                       res.push(details);
-                               }
+                               res.extend(peer_state.channel_by_id.iter()
+                                       .filter_map(|(chan_id, phase)| match phase {
+                                               // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded.
+                                               ChannelPhase::Funded(chan) => Some((chan_id, chan)),
+                                               _ => None,
+                                       })
+                                       .filter(f)
+                                       .map(|(_channel_id, channel)| {
+                                               ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                                       peer_state.latest_features.clone(), &self.fee_estimator)
+                                       })
+                               );
                        }
                }
                res
@@ -2348,18 +2421,8 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               for (_channel_id, channel) in peer_state.channel_by_id.iter() {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone(), &self.fee_estimator);
-                                       res.push(details);
-                               }
-                               for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone(), &self.fee_estimator);
-                                       res.push(details);
-                               }
-                               for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                               for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
+                                       let details = ChannelDetails::from_channel_context(context, best_block_height,
                                                peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
@@ -2390,15 +2453,13 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        let features = &peer_state.latest_features;
-                       let chan_context_to_details = |context| {
+                       let context_to_details = |context| {
                                ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
                        };
                        return peer_state.channel_by_id
                                .iter()
-                               .map(|(_, channel)| &channel.context)
-                               .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
-                               .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
-                               .map(chan_context_to_details)
+                               .map(|(_, phase)| phase.context())
+                               .map(context_to_details)
                                .collect();
                }
                vec![]
@@ -2414,18 +2475,26 @@ where
        /// [`Event::PaymentSent`]: events::Event::PaymentSent
        pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
                self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
-                       .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment {
+                       .filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
+                               PendingOutboundPayment::AwaitingInvoice { .. } => {
+                                       Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
+                               },
+                               // InvoiceReceived is an intermediate state and doesn't need to be exposed
+                               PendingOutboundPayment::InvoiceReceived { .. } => {
+                                       Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
+                               },
                                PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
                                        Some(RecentPaymentDetails::Pending {
+                                               payment_id: *payment_id,
                                                payment_hash: *payment_hash,
                                                total_msat: *total_msat,
                                        })
                                },
                                PendingOutboundPayment::Abandoned { payment_hash, .. } => {
-                                       Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash })
+                                       Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
                                },
                                PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
-                                       Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash })
+                                       Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
                                },
                                PendingOutboundPayment::Legacy { .. } => None
                        })
@@ -2452,7 +2521,7 @@ where
                }, None));
        }
 
-       fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+       fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
@@ -2467,37 +2536,40 @@ where
                                let peer_state = &mut *peer_state_lock;
 
                                match peer_state.channel_by_id.entry(channel_id.clone()) {
-                                       hash_map::Entry::Occupied(mut chan_entry) => {
-                                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                                               let their_features = &peer_state.latest_features;
-                                               let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
-                                                       .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
-                                               failed_htlcs = htlcs;
-
-                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                               // here as we don't need the monitor update to complete until we send a
-                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: *counterparty_node_id,
-                                                       msg: shutdown_msg,
-                                               });
+                                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                                       let funding_txo_opt = chan.context.get_funding_txo();
+                                                       let their_features = &peer_state.latest_features;
+                                                       let (shutdown_msg, mut monitor_update_opt, htlcs) =
+                                                               chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+                                                       failed_htlcs = htlcs;
+
+                                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                                       // here as we don't need the monitor update to complete until we send a
+                                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: *counterparty_node_id,
+                                                               msg: shutdown_msg,
+                                                       });
 
-                                               // Update the monitor with the shutdown script if necessary.
-                                               if let Some(monitor_update) = monitor_update_opt.take() {
-                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
-                                               }
+                                                       // Update the monitor with the shutdown script if necessary.
+                                                       if let Some(monitor_update) = monitor_update_opt.take() {
+                                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                                       peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+                                                       }
 
-                                               if chan_entry.get().is_shutdown() {
-                                                       let channel = remove_channel!(self, chan_entry);
-                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
-                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: channel_update
-                                                               });
+                                                       if chan.is_shutdown() {
+                                                               if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
+                                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       msg: channel_update
+                                                                               });
+                                                                       }
+                                                                       self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
+                                                               }
                                                        }
-                                                       self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+                                                       break Ok(());
                                                }
-                                               break Ok(());
                                        },
                                        hash_map::Entry::Vacant(_) => (),
                                }
@@ -2507,8 +2579,6 @@ where
                        //
                        // An appropriate error will be returned for non-existence of the channel if that's the case.
                        return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
-                       // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
-                       // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
                };
 
                for htlc_source in failed_htlcs.drain(..) {
@@ -2544,7 +2614,7 @@ where
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
        /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
-       pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+       pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
                self.close_channel_internal(channel_id, counterparty_node_id, None, None)
        }
 
@@ -2578,7 +2648,7 @@ where
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
        /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
-       pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+       pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
 
@@ -2603,7 +2673,7 @@ where
 
        /// `peer_msg` should be set when we receive a message from a peer, but not set when the
        /// user closes, which will be re-exposed as the `ChannelClosed` reason.
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+       fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
        -> Result<PublicKey, APIError> {
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(peer_node_id)
@@ -2616,34 +2686,29 @@ where
                        } else {
                                ClosureReason::HolderForceClosed
                        };
-                       if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-                               self.issue_channel_close_events(&chan.get().context, closure_reason);
-                               let mut chan = remove_channel!(self, chan);
-                               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
-                               (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
-                       } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-                               self.issue_channel_close_events(&chan.get().context, closure_reason);
-                               let mut chan = remove_channel!(self, chan);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Unfunded channel has no update
-                               (None, chan.context.get_counterparty_node_id())
-                       } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-                               self.issue_channel_close_events(&chan.get().context, closure_reason);
-                               let mut chan = remove_channel!(self, chan);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Unfunded channel has no update
-                               (None, chan.context.get_counterparty_node_id())
+                       if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
+                               log_error!(self.logger, "Force-closing channel {}", channel_id);
+                               self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
+                               let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+                               match chan_phase {
+                                       ChannelPhase::Funded(mut chan) => {
+                                               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+                                               (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
+                                       },
+                                       ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
+                                               self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false));
+                                               // Unfunded channel has no update
+                                               (None, chan_phase.context().get_counterparty_node_id())
+                                       },
+                               }
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
                                // N.B. that we don't send any channel close event here: we
                                // don't have a user_channel_id, and we never sent any opening
                                // events anyway.
                                (None, *peer_node_id)
                        } else {
-                               return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
+                               return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
                        }
                };
                if let Some(update) = update_opt {
@@ -2656,7 +2721,7 @@ where
                Ok(counterparty_node_id)
        }
 
-       fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
+       fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
@@ -2682,7 +2747,7 @@ where
        /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
        /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
        /// channel.
-       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, true)
        }
@@ -2693,7 +2758,7 @@ where
        ///
        /// You can always get the latest local transaction(s) to broadcast from
        /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
-       pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, false)
        }
@@ -2730,7 +2795,7 @@ where
                let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
                        msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
                                (short_channel_id, amt_to_forward, outgoing_cltv_value),
-                       msgs::InboundOnionPayload::Receive { .. } =>
+                       msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
                                return Err(InboundOnionErr {
                                        msg: "Final Node OnionHopData provided for us as an intermediary node",
                                        err_code: 0x4000 | 22,
@@ -2762,12 +2827,19 @@ where
                                payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
                        } =>
                                (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
-                       _ =>
+                       msgs::InboundOnionPayload::BlindedReceive {
+                               amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
+                       } => {
+                               let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
+                               (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
+                       }
+                       msgs::InboundOnionPayload::Forward { .. } => {
                                return Err(InboundOnionErr {
                                        err_code: 0x4000|22,
                                        err_data: Vec::new(),
                                        msg: "Got non final data with an HMAC of 0",
-                               }),
+                               })
+                       },
                };
                // final_incorrect_cltv_expiry
                if outgoing_cltv_value > cltv_expiry {
@@ -2907,7 +2979,10 @@ where
                        }
                }
 
-               let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+               let next_hop = match onion_utils::decode_next_payment_hop(
+                       shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
+                       msg.payment_hash, &self.node_signer
+               ) {
                        Ok(res) => res,
                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                return_malformed_err!(err_msg, err_code);
@@ -2929,7 +3004,9 @@ where
                        // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
                        // inbound channel's state.
                        onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
-                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } => {
+                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
+                               onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
+                       {
                                return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
                        }
                };
@@ -2961,7 +3038,9 @@ where
                                }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
+                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
+                                       |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
+                               ).flatten() {
                                        None => {
                                                // Channel was removed. The short_to_chan_info and channel_by_id maps
                                                // have no consistency guarantees.
@@ -3133,7 +3212,7 @@ where
                if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -3149,7 +3228,7 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", &chan.context.channel_id());
                let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -3159,7 +3238,7 @@ where
        }
 
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               log_trace!(self.logger, "Generating channel update for channel {}", &chan.context.channel_id());
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
                let enabled = chan.context.is_usable() && match chan.channel_update_status() {
@@ -3234,36 +3313,41 @@ where
                                .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
-                               if !chan.get().context.is_live() {
-                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
-                               }
-                               let funding_txo = chan.get().context.get_funding_txo().unwrap();
-                               let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
-                                       htlc_cltv, HTLCSource::OutboundRoute {
-                                               path: path.clone(),
-                                               session_priv: session_priv.clone(),
-                                               first_hop_htlc_msat: htlc_msat,
-                                               payment_id,
-                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
-                               match break_chan_entry!(self, send_res, chan) {
-                                       Some(monitor_update) => {
-                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
-                                                       Err(e) => break Err(e),
-                                                       Ok(false) => {
-                                                               // Note that MonitorUpdateInProgress here indicates (per function
-                                                               // docs) that we will resend the commitment update once monitor
-                                                               // updating completes. Therefore, we must return an error
-                                                               // indicating that it is unsafe to retry the payment wholesale,
-                                                               // which we do in the send_payment check for
-                                                               // MonitorUpdateInProgress, below.
-                                                               return Err(APIError::MonitorUpdateInProgress);
+                       if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
+                               match chan_phase_entry.get_mut() {
+                                       ChannelPhase::Funded(chan) => {
+                                               if !chan.context.is_live() {
+                                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
+                                               }
+                                               let funding_txo = chan.context.get_funding_txo().unwrap();
+                                               let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+                                                       htlc_cltv, HTLCSource::OutboundRoute {
+                                                               path: path.clone(),
+                                                               session_priv: session_priv.clone(),
+                                                               first_hop_htlc_msat: htlc_msat,
+                                                               payment_id,
+                                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
+                                               match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
+                                                       Some(monitor_update) => {
+                                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
+                                                                       Err(e) => break Err(e),
+                                                                       Ok(false) => {
+                                                                               // Note that MonitorUpdateInProgress here indicates (per function
+                                                                               // docs) that we will resend the commitment update once monitor
+                                                                               // updating completes. Therefore, we must return an error
+                                                                               // indicating that it is unsafe to retry the payment wholesale,
+                                                                               // which we do in the send_payment check for
+                                                                               // MonitorUpdateInProgress, below.
+                                                                               return Err(APIError::MonitorUpdateInProgress);
+                                                                       },
+                                                                       Ok(true) => {},
+                                                               }
                                                        },
-                                                       Ok(true) => {},
+                                                       None => {},
                                                }
                                        },
-                                       None => { },
-                               }
+                                       _ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
+                               };
                        } else {
                                // The channel was likely removed after we fetched the id from the
                                // `short_to_chan_info` map, but before we successfully locked the
@@ -3376,10 +3460,12 @@ where
        }
 
 
-       /// Signals that no further retries for the given payment should occur. Useful if you have a
+       /// Signals that no further attempts for the given payment should occur. Useful if you have a
        /// pending outbound payment with retries remaining, but wish to stop retrying the payment before
        /// retries are exhausted.
        ///
+       /// # Event Generation
+       ///
        /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
        /// as there are no remaining pending HTLCs for this payment.
        ///
@@ -3387,11 +3473,19 @@ where
        /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
        /// determine the ultimate status of a payment.
        ///
-       /// If an [`Event::PaymentFailed`] event is generated and we restart without this
-       /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
+       /// # Requested Invoices
        ///
-       /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
-       /// [`Event::PaymentSent`]: events::Event::PaymentSent
+       /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the
+       /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying
+       /// it once received. The other events may only be generated once the invoice has been received.
+       ///
+       /// # Restart Behavior
+       ///
+       /// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
+       /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated; likewise for
+       /// [`Event::InvoiceRequestFailed`].
+       ///
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        pub fn abandon_payment(&self, payment_id: PaymentId) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
@@ -3455,7 +3549,7 @@ where
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
        fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
-               &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+               &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
        ) -> Result<(), APIError> {
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -3463,8 +3557,8 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) {
-                       Some(chan) => {
+               let (chan, msg) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+                       Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
                                let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
@@ -3488,13 +3582,18 @@ where
                                        },
                                }
                        },
-                       None => {
-                               return Err(APIError::ChannelUnavailable {
+                       Some(phase) => {
+                               peer_state.channel_by_id.insert(*temporary_channel_id, phase);
+                               return Err(APIError::APIMisuseError {
                                        err: format!(
-                                               "Channel with id {} not found for the passed counterparty node_id {}",
-                                               log_bytes!(*temporary_channel_id), counterparty_node_id),
+                                               "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
+                                               temporary_channel_id, counterparty_node_id),
                                })
                        },
+                       None => return Err(APIError::ChannelUnavailable {err: format!(
+                               "Channel with id {} not found for the passed counterparty node_id {}",
+                               temporary_channel_id, counterparty_node_id),
+                               }),
                };
 
                peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
@@ -3510,14 +3609,14 @@ where
                                if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
-                               e.insert(chan);
+                               e.insert(ChannelPhase::Funded(chan));
                        }
                }
                Ok(())
        }
 
        #[cfg(test)]
-       pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
+       pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
                self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
                        Ok(OutPoint { txid: tx.txid(), index: output_index })
                })
@@ -3553,14 +3652,16 @@ where
        ///
        /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
        /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
-       pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
+       pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
-               for inp in funding_transaction.input.iter() {
-                       if inp.witness.is_empty() {
-                               return Err(APIError::APIMisuseError {
-                                       err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
-                               });
+               if !funding_transaction.is_coin_base() {
+                       for inp in funding_transaction.input.iter() {
+                               if inp.witness.is_empty() {
+                                       return Err(APIError::APIMisuseError {
+                                               err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
+                                       });
+                               }
                        }
                }
                {
@@ -3626,7 +3727,7 @@ where
        /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
        /// [`APIMisuseError`]: APIError::APIMisuseError
        pub fn update_partial_channel_config(
-               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate,
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
        ) -> Result<(), APIError> {
                if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
                        return Err(APIError::APIMisuseError {
@@ -3643,46 +3744,37 @@ where
                for channel_id in channel_ids {
                        if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
-                                       err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
+                                       err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id),
                                });
                        };
                }
                for channel_id in channel_ids {
-                       if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
-                               let mut config = channel.context.config();
+                       if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
+                               let mut config = channel_phase.context().config();
                                config.apply(config_update);
-                               if !channel.context.update_config(&config) {
+                               if !channel_phase.context_mut().update_config(&config) {
                                        continue;
                                }
-                               if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
-                               } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                               node_id: channel.context.get_counterparty_node_id(),
-                                               msg,
-                                       });
+                               if let ChannelPhase::Funded(channel) = channel_phase {
+                                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: channel.context.get_counterparty_node_id(),
+                                                       msg,
+                                               });
+                                       }
                                }
                                continue;
-                       }
-
-                       let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
-                               &mut channel.context
-                       } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
-                               &mut channel.context
                        } else {
                                // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
                                debug_assert!(false);
                                return Err(APIError::ChannelUnavailable {
                                        err: format!(
                                                "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
-                                               log_bytes!(*channel_id), counterparty_node_id),
+                                               channel_id, counterparty_node_id),
                                });
                        };
-                       let mut config = context.config();
-                       config.apply(config_update);
-                       // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
-                       // which would be the case for pending inbound/outbound channels.
-                       context.update_config(&config);
                }
                Ok(())
        }
@@ -3710,7 +3802,7 @@ where
        /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
        /// [`APIMisuseError`]: APIError::APIMisuseError
        pub fn update_channel_config(
-               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
        ) -> Result<(), APIError> {
                return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
        }
@@ -3740,7 +3832,7 @@ where
        /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
        // TODO: when we move to deciding the best outbound channel at forward time, only take
        // `next_node_id` and not `next_hop_channel_id`
-       pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
+       pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let next_hop_scid = {
@@ -3750,17 +3842,21 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.get(next_hop_channel_id) {
-                               Some(chan) => {
+                               Some(ChannelPhase::Funded(chan)) => {
                                        if !chan.context.is_usable() {
                                                return Err(APIError::ChannelUnavailable {
-                                                       err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
+                                                       err: format!("Channel with id {} not fully established", next_hop_channel_id)
                                                })
                                        }
                                        chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
                                },
+                               Some(_) => return Err(APIError::ChannelUnavailable {
+                                       err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
+                                               next_hop_channel_id, next_node_id)
+                               }),
                                None => return Err(APIError::ChannelUnavailable {
-                                       err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
-                                               log_bytes!(*next_hop_channel_id), next_node_id)
+                                       err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
+                                               next_hop_channel_id, next_node_id)
                                })
                        }
                };
@@ -3897,7 +3993,10 @@ where
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
                                                                                        if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
-                                                                                               let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+                                                                                               let next_hop = match onion_utils::decode_next_payment_hop(
+                                                                                                       phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
+                                                                                                       payment_hash, &self.node_signer
+                                                                                               ) {
                                                                                                        Ok(res) => res,
                                                                                                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                                                                                let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
@@ -3955,71 +4054,68 @@ where
                                        }
                                        let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
-                                       match peer_state.channel_by_id.entry(forward_chan_id) {
-                                               hash_map::Entry::Vacant(_) => {
-                                                       forwarding_channel_not_found!();
-                                                       continue;
-                                               },
-                                               hash_map::Entry::Occupied(mut chan) => {
-                                                       for forward_info in pending_forwards.drain(..) {
-                                                               match forward_info {
-                                                                       HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                               prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                                               forward_info: PendingHTLCInfo {
-                                                                                       incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
-                                                                                       routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
-                                                                               },
-                                                                       }) => {
-                                                                               log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
-                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                                       short_channel_id: prev_short_channel_id,
-                                                                                       user_channel_id: Some(prev_user_channel_id),
-                                                                                       outpoint: prev_funding_outpoint,
-                                                                                       htlc_id: prev_htlc_id,
-                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
-                                                                                       // Phantom payments are only PendingHTLCRouting::Receive.
-                                                                                       phantom_shared_secret: None,
-                                                                               });
-                                                                               if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
-                                                                                       payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                                       onion_packet, skimmed_fee_msat, &self.fee_estimator,
-                                                                                       &self.logger)
-                                                                               {
-                                                                                       if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
-                                                                                       } else {
-                                                                                               panic!("Stated return value requirements in send_htlc() were not met");
-                                                                                       }
-                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
-                                                                                       failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::reason(failure_code, data),
-                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
-                                                                                       ));
-                                                                                       continue;
-                                                                               }
-                                                                       },
-                                                                       HTLCForwardInfo::AddHTLC { .. } => {
-                                                                               panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+                                       if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                               for forward_info in pending_forwards.drain(..) {
+                                                       match forward_info {
+                                                               HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+                                                                       prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+                                                                       forward_info: PendingHTLCInfo {
+                                                                               incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+                                                                               routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
                                                                        },
-                                                                       HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                               log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                               if let Err(e) = chan.get_mut().queue_fail_htlc(
-                                                                                       htlc_id, err_packet, &self.logger
-                                                                               ) {
-                                                                                       if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                                       } else {
-                                                                                               panic!("Stated return value requirements in queue_fail_htlc() were not met");
-                                                                                       }
-                                                                                       // fail-backs are best-effort, we probably already have one
-                                                                                       // pending, and if not that's OK, if not, the channel is on
-                                                                                       // the chain and sending the HTLC-Timeout is their problem.
-                                                                                       continue;
+                                                               }) => {
+                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
+                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                               short_channel_id: prev_short_channel_id,
+                                                                               user_channel_id: Some(prev_user_channel_id),
+                                                                               outpoint: prev_funding_outpoint,
+                                                                               htlc_id: prev_htlc_id,
+                                                                               incoming_packet_shared_secret: incoming_shared_secret,
+                                                                               // Phantom payments are only PendingHTLCRouting::Receive.
+                                                                               phantom_shared_secret: None,
+                                                                       });
+                                                                       if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
+                                                                               payment_hash, outgoing_cltv_value, htlc_source.clone(),
+                                                                               onion_packet, skimmed_fee_msat, &self.fee_estimator,
+                                                                               &self.logger)
+                                                                       {
+                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                       log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+                                                                               } else {
+                                                                                       panic!("Stated return value requirements in send_htlc() were not met");
                                                                                }
-                                                                       },
-                                                               }
+                                                                               let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
+                                                                               failed_forwards.push((htlc_source, payment_hash,
+                                                                                       HTLCFailReason::reason(failure_code, data),
+                                                                                       HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
+                                                                               ));
+                                                                               continue;
+                                                                       }
+                                                               },
+                                                               HTLCForwardInfo::AddHTLC { .. } => {
+                                                                       panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+                                                               },
+                                                               HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       if let Err(e) = chan.queue_fail_htlc(
+                                                                               htlc_id, err_packet, &self.logger
+                                                                       ) {
+                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                               } else {
+                                                                                       panic!("Stated return value requirements in queue_fail_htlc() were not met");
+                                                                               }
+                                                                               // fail-backs are best-effort, we probably already have one
+                                                                               // pending, and if not that's OK, if not, the channel is on
+                                                                               // the chain and sending the HTLC-Timeout is their problem.
+                                                                               continue;
+                                                                       }
+                                                               },
                                                        }
                                                }
+                                       } else {
+                                               forwarding_channel_not_found!();
+                                               continue;
                                        }
                                } else {
                                        'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
@@ -4304,7 +4400,7 @@ where
                let mut background_events = Vec::new();
                mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
                if background_events.is_empty() {
-                       return NotifyOption::SkipPersist;
+                       return NotifyOption::SkipPersistNoEvents;
                }
 
                for event in background_events.drain(..) {
@@ -4322,10 +4418,10 @@ where
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
-                                                               hash_map::Entry::Occupied(mut chan) => {
+                                                               hash_map::Entry::Occupied(mut chan_phase) => {
                                                                        updated_chan = true;
                                                                        handle_new_monitor_update!(self, funding_txo, update.clone(),
-                                                                               peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
+                                                                               peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
                                                                },
                                                                hash_map::Entry::Vacant(_) => Ok(()),
                                                        }
@@ -4349,7 +4445,7 @@ where
                                        if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                let peer_state = &mut *peer_state_lock;
-                                               if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
+                                               if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
                                                        handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
                                                } else {
                                                        let update_actions = peer_state.monitor_update_blocked_actions
@@ -4372,21 +4468,21 @@ where
                let _ = self.process_background_events();
        }
 
-       fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
-               if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
+       fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
+               if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
-                               log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
-                       return NotifyOption::SkipPersist;
+                               chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                       return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
-                               log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
-                       return NotifyOption::SkipPersist;
+                               chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                       return NotifyOption::SkipPersistNoEvents;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
-                       log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                       &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
                chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
                NotifyOption::DoPersist
@@ -4398,8 +4494,8 @@ where
        /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
        /// it wants to detect). Thus, we have a variant exposed here for its benefit.
        pub fn maybe_update_chan_fees(&self) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut should_persist = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       let mut should_persist = NotifyOption::SkipPersistNoEvents;
 
                        let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                        let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
@@ -4408,7 +4504,9 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+                               for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
+                                       |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
+                               ) {
                                        let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                                                min_mempool_feerate
                                        } else {
@@ -4441,8 +4539,8 @@ where
        /// [`ChannelUpdate`]: msgs::ChannelUpdate
        /// [`ChannelConfig`]: crate::util::config::ChannelConfig
        pub fn timer_tick_occurred(&self) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut should_persist = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       let mut should_persist = NotifyOption::SkipPersistNoEvents;
 
                        let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                        let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
@@ -4450,6 +4548,36 @@ where
                        let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        let mut pending_peers_awaiting_removal = Vec::new();
+
+                       let process_unfunded_channel_tick = |
+                               chan_id: &ChannelId,
+                               context: &mut ChannelContext<SP>,
+                               unfunded_context: &mut UnfundedChannelContext,
+                               pending_msg_events: &mut Vec<MessageSendEvent>,
+                               counterparty_node_id: PublicKey,
+                       | {
+                               context.maybe_expire_prev_config();
+                               if unfunded_context.should_expire_unfunded_channel() {
+                                       log_error!(self.logger,
+                                               "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
+                                       update_maps_on_chan_removal!(self, &context);
+                                       self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
+                                       self.finish_force_close_channel(context.force_shutdown(false));
+                                       pending_msg_events.push(MessageSendEvent::HandleError {
+                                               node_id: counterparty_node_id,
+                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                       msg: msgs::ErrorMessage {
+                                                               channel_id: *chan_id,
+                                                               data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
+                                                       },
+                                               },
+                                       });
+                                       false
+                               } else {
+                                       true
+                               }
+                       };
+
                        {
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
@@ -4457,114 +4585,93 @@ where
                                        let peer_state = &mut *peer_state_lock;
                                        let pending_msg_events = &mut peer_state.pending_msg_events;
                                        let counterparty_node_id = *counterparty_node_id;
-                                       peer_state.channel_by_id.retain(|chan_id, chan| {
-                                               let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
-                                                       min_mempool_feerate
-                                               } else {
-                                                       normal_feerate
-                                               };
-                                               let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
-                                               if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
-
-                                               if let Err(e) = chan.timer_check_closing_negotiation_progress() {
-                                                       let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
-                                                       handle_errors.push((Err(err), counterparty_node_id));
-                                                       if needs_close { return false; }
-                                               }
-
-                                               match chan.channel_update_status() {
-                                                       ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
-                                                       ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
-                                                       ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
-                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
-                                                       ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
-                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
-                                                       ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
-                                                               n += 1;
-                                                               if n >= DISABLE_GOSSIP_TICKS {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
-                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: update
-                                                                               });
-                                                                       }
-                                                                       should_persist = NotifyOption::DoPersist;
-                                                               } else {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
-                                                               }
-                                                       },
-                                                       ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
-                                                               n += 1;
-                                                               if n >= ENABLE_GOSSIP_TICKS {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
-                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: update
-                                                                               });
-                                                                       }
-                                                                       should_persist = NotifyOption::DoPersist;
+                                       peer_state.channel_by_id.retain(|chan_id, phase| {
+                                               match phase {
+                                                       ChannelPhase::Funded(chan) => {
+                                                               let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                                                                       min_mempool_feerate
                                                                } else {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
+                                                                       normal_feerate
+                                                               };
+                                                               let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
+                                                               if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+
+                                                               if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+                                                                       let (needs_close, err) = convert_chan_phase_err!(self, e, chan, chan_id, FUNDED_CHANNEL);
+                                                                       handle_errors.push((Err(err), counterparty_node_id));
+                                                                       if needs_close { return false; }
                                                                }
-                                                       },
-                                                       _ => {},
-                                               }
 
-                                               chan.context.maybe_expire_prev_config();
-
-                                               if chan.should_disconnect_peer_awaiting_response() {
-                                                       log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
-                                                                       counterparty_node_id, log_bytes!(*chan_id));
-                                                       pending_msg_events.push(MessageSendEvent::HandleError {
-                                                               node_id: counterparty_node_id,
-                                                               action: msgs::ErrorAction::DisconnectPeerWithWarning {
-                                                                       msg: msgs::WarningMessage {
-                                                                               channel_id: *chan_id,
-                                                                               data: "Disconnecting due to timeout awaiting response".to_owned(),
+                                                               match chan.channel_update_status() {
+                                                                       ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
+                                                                       ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
+                                                                       ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
+                                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+                                                                       ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
+                                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+                                                                       ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
+                                                                               n += 1;
+                                                                               if n >= DISABLE_GOSSIP_TICKS {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+                                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                                       msg: update
+                                                                                               });
+                                                                                       }
+                                                                                       should_persist = NotifyOption::DoPersist;
+                                                                               } else {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
+                                                                               }
                                                                        },
-                                                               },
-                                                       });
-                                               }
+                                                                       ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
+                                                                               n += 1;
+                                                                               if n >= ENABLE_GOSSIP_TICKS {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+                                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                                       msg: update
+                                                                                               });
+                                                                                       }
+                                                                                       should_persist = NotifyOption::DoPersist;
+                                                                               } else {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
+                                                                               }
+                                                                       },
+                                                                       _ => {},
+                                                               }
 
-                                               true
-                                       });
+                                                               chan.context.maybe_expire_prev_config();
+
+                                                               if chan.should_disconnect_peer_awaiting_response() {
+                                                                       log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+                                                                                       counterparty_node_id, chan_id);
+                                                                       pending_msg_events.push(MessageSendEvent::HandleError {
+                                                                               node_id: counterparty_node_id,
+                                                                               action: msgs::ErrorAction::DisconnectPeerWithWarning {
+                                                                                       msg: msgs::WarningMessage {
+                                                                                               channel_id: *chan_id,
+                                                                                               data: "Disconnecting due to timeout awaiting response".to_owned(),
+                                                                                       },
+                                                                               },
+                                                                       });
+                                                               }
 
-                                       let process_unfunded_channel_tick = |
-                                               chan_id: &[u8; 32],
-                                               chan_context: &mut ChannelContext<SP>,
-                                               unfunded_chan_context: &mut UnfundedChannelContext,
-                                               pending_msg_events: &mut Vec<MessageSendEvent>,
-                                       | {
-                                               chan_context.maybe_expire_prev_config();
-                                               if unfunded_chan_context.should_expire_unfunded_channel() {
-                                                       log_error!(self.logger,
-                                                               "Force-closing pending channel with ID {} for not establishing in a timely manner",
-                                                               log_bytes!(&chan_id[..]));
-                                                       update_maps_on_chan_removal!(self, &chan_context);
-                                                       self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
-                                                       self.finish_force_close_channel(chan_context.force_shutdown(false));
-                                                       pending_msg_events.push(MessageSendEvent::HandleError {
-                                                               node_id: counterparty_node_id,
-                                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                                       msg: msgs::ErrorMessage {
-                                                                               channel_id: *chan_id,
-                                                                               data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
-                                                                       },
-                                                               },
-                                                       });
-                                                       false
-                                               } else {
-                                                       true
+                                                               true
+                                                       },
+                                                       ChannelPhase::UnfundedInboundV1(chan) => {
+                                                               process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+                                                                       pending_msg_events, counterparty_node_id)
+                                                       },
+                                                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                               process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+                                                                       pending_msg_events, counterparty_node_id)
+                                                       },
                                                }
-                                       };
-                                       peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
-                                               chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
-                                       peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
-                                               chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
+                                       });
 
                                        for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
                                                if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
-                                                       log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", log_bytes!(&chan_id[..]));
+                                                       log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
                                                        peer_state.pending_msg_events.push(
                                                                events::MessageSendEvent::HandleError {
                                                                        node_id: counterparty_node_id,
@@ -4648,7 +4755,7 @@ where
                                let _ = handle_error!(self, err, counterparty_node_id);
                        }
 
-                       self.pending_outbound_payments.remove_stale_resolved_payments(&self.pending_events);
+                       self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
 
                        // Technically we don't need to do this here, but if we have holding cell entries in a
                        // channel that need freeing, it's better to do that here and block a background task
@@ -4767,7 +4874,7 @@ where
        // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
        // be surfaced to the user.
        fn fail_holding_cell_htlcs(
-               &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
+               &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
                counterparty_node_id: &PublicKey
        ) {
                let (failure_code, onion_failure_data) = {
@@ -4776,8 +4883,14 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                match peer_state.channel_by_id.entry(channel_id) {
-                                       hash_map::Entry::Occupied(chan_entry) => {
-                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+                                       hash_map::Entry::Occupied(chan_phase_entry) => {
+                                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get() {
+                                                       self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan)
+                                               } else {
+                                                       // We shouldn't be trying to fail holding cell HTLCs on an unfunded channel.
+                                                       debug_assert!(false);
+                                                       (0x4000|10, Vec::new())
+                                               }
                                        },
                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                }
@@ -5036,36 +5149,38 @@ where
                        if peer_state_opt.is_some() {
                                let mut peer_state_lock = peer_state_opt.unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
-                                       let counterparty_node_id = chan.get().context.get_counterparty_node_id();
-                                       let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
-                                       if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
-                                               if let Some(action) = completion_action(Some(htlc_value_msat)) {
-                                                       log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
-                                                               log_bytes!(chan_id), action);
-                                                       peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
-                                               }
-                                               if !during_init {
-                                                       let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
-                                                               peer_state, per_peer_state, chan);
-                                                       if let Err(e) = res {
-                                                               // TODO: This is a *critical* error - we probably updated the outbound edge
-                                                               // of the HTLC's monitor with a preimage. We should retry this monitor
-                                                               // update over and over again until morale improves.
-                                                               log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
-                                                               return Err((counterparty_node_id, e));
+                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let counterparty_node_id = chan.context.get_counterparty_node_id();
+                                               let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+                                               if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+                                                       if let Some(action) = completion_action(Some(htlc_value_msat)) {
+                                                               log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                                       chan_id, action);
+                                                               peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+                                                       }
+                                                       if !during_init {
+                                                               let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+                                                                       peer_state, per_peer_state, chan_phase_entry);
+                                                               if let Err(e) = res {
+                                                                       // TODO: This is a *critical* error - we probably updated the outbound edge
+                                                                       // of the HTLC's monitor with a preimage. We should retry this monitor
+                                                                       // update over and over again until morale improves.
+                                                                       log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+                                                                       return Err((counterparty_node_id, e));
+                                                               }
+                                                       } else {
+                                                               // If we're running during init we cannot update a monitor directly -
+                                                               // they probably haven't actually been loaded yet. Instead, push the
+                                                               // monitor update as a background event.
+                                                               self.pending_background_events.lock().unwrap().push(
+                                                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                                               counterparty_node_id,
+                                                                               funding_txo: prev_hop.outpoint,
+                                                                               update: monitor_update.clone(),
+                                                                       });
                                                        }
-                                               } else {
-                                                       // If we're running during init we cannot update a monitor directly -
-                                                       // they probably haven't actually been loaded yet. Instead, push the
-                                                       // monitor update as a background event.
-                                                       self.pending_background_events.lock().unwrap().push(
-                                                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                                                       counterparty_node_id,
-                                                                       funding_txo: prev_hop.outpoint,
-                                                                       update: monitor_update.clone(),
-                                                               });
                                                }
                                        }
                                        return Ok(());
@@ -5120,11 +5235,17 @@ where
                self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
        }
 
-       fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
+       fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
+               forwarded_htlc_value_msat: Option<u64>, from_onchain: bool,
+               next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+       ) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
                                        "We don't support claim_htlc claims during startup - monitors may not be available yet");
+                               if let Some(pubkey) = next_channel_counterparty_node_id {
+                                       debug_assert_eq!(pubkey, path.hops[0].pubkey);
+                               }
                                let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                        channel_funding_outpoint: next_channel_outpoint,
                                        counterparty_node_id: path.hops[0].pubkey,
@@ -5135,6 +5256,7 @@ where
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
+                               let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                let res = self.claim_funds_from_hop(hop_data, payment_preimage,
                                        |htlc_claim_value_msat| {
                                                if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
@@ -5150,7 +5272,17 @@ where
                                                                        next_channel_id: Some(next_channel_outpoint.to_channel_id()),
                                                                        outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
                                                                },
-                                                               downstream_counterparty_and_funding_outpoint: None,
+                                                               downstream_counterparty_and_funding_outpoint:
+                                                                       if let Some(node_id) = next_channel_counterparty_node_id {
+                                                                               Some((node_id, next_channel_outpoint, completed_blocker))
+                                                                       } else {
+                                                                               // We can only get `None` here if we are processing a
+                                                                               // `ChannelMonitor`-originated event, in which case we
+                                                                               // don't care about ensuring we wake the downstream
+                                                                               // channel's monitor updating - the channel is already
+                                                                               // closed.
+                                                                               None
+                                                                       },
                                                        })
                                                } else { None }
                                        });
@@ -5210,7 +5342,7 @@ where
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
                log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
-                       log_bytes!(channel.context.channel_id()),
+                       &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
                        if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
@@ -5298,7 +5430,7 @@ where
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
-                       if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
@@ -5338,7 +5470,7 @@ where
        ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
-       pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
+       pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
                self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
        }
 
@@ -5360,11 +5492,11 @@ where
        ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
-       pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
+       pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
                self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
        }
 
-       fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+       fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
@@ -5428,7 +5560,7 @@ where
                        msg: channel.accept_inbound_channel(),
                });
 
-               peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel);
+               peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
 
                Ok(())
        }
@@ -5460,24 +5592,34 @@ where
                peer: &PeerState<SP>, best_block_height: u32
        ) -> usize {
                let mut num_unfunded_channels = 0;
-               for (_, chan) in peer.channel_by_id.iter() {
-                       // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
-                       // which have not yet had any confirmations on-chain.
-                       if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
-                               chan.context.get_funding_tx_confirmations(best_block_height) == 0
-                       {
-                               num_unfunded_channels += 1;
-                       }
-               }
-               for (_, chan) in peer.inbound_v1_channel_by_id.iter() {
-                       if chan.context.minimum_depth().unwrap_or(1) != 0 {
-                               num_unfunded_channels += 1;
+               for (_, phase) in peer.channel_by_id.iter() {
+                       match phase {
+                               ChannelPhase::Funded(chan) => {
+                                       // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
+                                       // which have not yet had any confirmations on-chain.
+                                       if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+                                               chan.context.get_funding_tx_confirmations(best_block_height) == 0
+                                       {
+                                               num_unfunded_channels += 1;
+                                       }
+                               },
+                               ChannelPhase::UnfundedInboundV1(chan) => {
+                                       if chan.context.minimum_depth().unwrap_or(1) != 0 {
+                                               num_unfunded_channels += 1;
+                                       }
+                               },
+                               ChannelPhase::UnfundedOutboundV1(_) => {
+                                       // Outbound channels don't contribute to the unfunded count in the DoS context.
+                                       continue;
+                               }
                        }
                }
                num_unfunded_channels + peer.inbound_channel_request_by_id.len()
        }
 
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+               // likely to be lost on restart!
                if msg.chain_hash != self.genesis_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
@@ -5572,11 +5714,13 @@ where
                        node_id: counterparty_node_id.clone(),
                        msg: channel.accept_inbound_channel(),
                });
-               peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
+               peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel));
                Ok(())
        }
 
        fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+               // likely to be lost on restart!
                let (value, output_script, user_id) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -5586,10 +5730,17 @@ where
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
-                                       (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
+                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                               hash_map::Entry::Occupied(mut phase) => {
+                                       match phase.get_mut() {
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
+                                                       (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
+                                               },
+                                               _ => {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                                               }
+                                       }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
@@ -5618,8 +5769,8 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let (chan, funding_msg, monitor) =
-                       match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) {
-                               Some(inbound_chan) => {
+                       match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
+                               Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
                                        match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
                                                Ok(res) => res,
                                                Err((mut inbound_chan, err)) => {
@@ -5634,6 +5785,9 @@ where
                                                },
                                        }
                                },
+                               Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                               },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        };
 
@@ -5665,22 +5819,25 @@ where
 
                                let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 
-                               let chan = e.insert(chan);
-                               let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
-                                       per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
-                                       { peer_state.channel_by_id.remove(&new_channel_id) });
-
-                               // Note that we reply with the new channel_id in error messages if we gave up on the
-                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                               // any messages referencing a previously-closed channel anyway.
-                               // We do not propagate the monitor update to the user as it would be for a monitor
-                               // that we didn't manage to store (and that we don't care about - we don't respond
-                               // with the funding_signed so the channel can never go on chain).
-                               if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
-                                       res.0 = None;
+                               if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
+                                       let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
+                                               per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
+                                               { peer_state.channel_by_id.remove(&new_channel_id) });
+
+                                       // Note that we reply with the new channel_id in error messages if we gave up on the
+                                       // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                                       // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                                       // any messages referencing a previously-closed channel anyway.
+                                       // We do not propagate the monitor update to the user as it would be for a monitor
+                                       // that we didn't manage to store (and that we don't care about - we don't respond
+                                       // with the funding_signed so the channel can never go on chain).
+                                       if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+                                               res.0 = None;
+                                       }
+                                       res.map(|_| ())
+                               } else {
+                                       unreachable!("This must be a funded channel as we just inserted it.");
                                }
-                               res.map(|_| ())
                        }
                }
        }
@@ -5697,26 +5854,35 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               let monitor = try_chan_entry!(self,
-                                       chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
-                               let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
-                               let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
-                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                       // monitor update contained within `shutdown_finish` was applied.
-                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                               shutdown_finish.0.take();
-                                       }
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               match chan_phase_entry.get_mut() {
+                                       ChannelPhase::Funded(ref mut chan) => {
+                                               let monitor = try_chan_phase_entry!(self,
+                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
+                                               let update_res = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor);
+                                               let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
+                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                       // monitor update contained within `shutdown_finish` was applied.
+                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                               shutdown_finish.0.take();
+                                                       }
+                                               }
+                                               res.map(|_| ())
+                                       },
+                                       _ => {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
+                                       },
                                }
-                               res.map(|_| ())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
        fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -5726,38 +5892,45 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
-                                       self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
-                               if let Some(announcement_sigs) = announcement_sigs_opt {
-                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id()));
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg: announcement_sigs,
-                                       });
-                               } else if chan.get().context.is_usable() {
-                                       // If we're sending an announcement_signatures, we'll send the (public)
-                                       // channel_update after sending a channel_announcement when we receive our
-                                       // counterparty's announcement_signatures. Thus, we only bother to send a
-                                       // channel_update here if the channel is not public, i.e. we're not sending an
-                                       // announcement_signatures.
-                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id()));
-                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
+                                               self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+                                       if let Some(announcement_sigs) = announcement_sigs_opt {
+                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                        node_id: counterparty_node_id.clone(),
-                                                       msg,
+                                                       msg: announcement_sigs,
                                                });
+                                       } else if chan.context.is_usable() {
+                                               // If we're sending an announcement_signatures, we'll send the (public)
+                                               // channel_update after sending a channel_announcement when we receive our
+                                               // counterparty's announcement_signatures. Thus, we only bother to send a
+                                               // channel_update here if the channel is not public, i.e. we're not sending an
+                                               // announcement_signatures.
+                                               log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
+                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg,
+                                                       });
+                                               }
                                        }
-                               }
-
-                               {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                       emit_channel_ready_event!(pending_events, chan.get_mut());
-                               }
 
-                               Ok(())
+                                       {
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               emit_channel_ready_event!(pending_events, chan);
+                                       }
+
+                                       Ok(())
+                               } else {
+                                       try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
+                               }
                        },
-                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                       hash_map::Entry::Vacant(_) => {
+                               Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                       }
                }
        }
 
@@ -5772,48 +5945,46 @@ where
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
-                       // https://github.com/lightningdevkit/rust-lightning/issues/2422
-                       if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
-                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
-                               self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
-                               let mut chan = remove_channel!(self, chan_entry);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               return Ok(());
-                       } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
-                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
-                               self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
-                               let mut chan = remove_channel!(self, chan_entry);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               return Ok(());
-                       } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                               if !chan_entry.get().received_shutdown() {
-                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
-                                               log_bytes!(msg.channel_id),
-                                               if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
-                               }
+                       if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                               let phase = chan_phase_entry.get_mut();
+                               match phase {
+                                       ChannelPhase::Funded(chan) => {
+                                               if !chan.received_shutdown() {
+                                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                               msg.channel_id,
+                                                               if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
+                                               }
 
-                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                               let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
-                                       chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
-                               dropped_htlcs = htlcs;
-
-                               if let Some(msg) = shutdown {
-                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                       // here as we don't need the monitor update to complete until we send a
-                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: *counterparty_node_id,
-                                               msg,
-                                       });
-                               }
+                                               let funding_txo_opt = chan.context.get_funding_txo();
+                                               let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self,
+                                                       chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
+                                               dropped_htlcs = htlcs;
 
-                               // Update the monitor with the shutdown script if necessary.
-                               if let Some(monitor_update) = monitor_update_opt {
-                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+                                               if let Some(msg) = shutdown {
+                                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                                       // here as we don't need the monitor update to complete until we send a
+                                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: *counterparty_node_id,
+                                                               msg,
+                                                       });
+                                               }
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update_opt {
+                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+                                               }
+                                               break Ok(());
+                                       },
+                                       ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+                                               let context = phase.context_mut();
+                                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+                                               self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+                                               let mut chan = remove_channel_phase!(self, chan_phase_entry);
+                                               self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
+                                               return Ok(());
+                                       },
                                }
-                               break Ok(());
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -5838,22 +6009,27 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
-                                       if let Some(msg) = closing_signed {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+                                               if let Some(msg) = closing_signed {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg,
+                                                       });
+                                               }
+                                               if tx.is_some() {
+                                                       // We're done with this channel, we've got a signed closing transaction and
+                                                       // will send the closing_signed back to the remote peer upon return. This
+                                                       // also implies there are no pending HTLCs left on the channel, so we can
+                                                       // fully delete it from tracking (the channel monitor is still around to
+                                                       // watch for old state broadcasts)!
+                                                       (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
+                                               } else { (tx, None) }
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
                                        }
-                                       if tx.is_some() {
-                                               // We're done with this channel, we've got a signed closing transaction and
-                                               // will send the closing_signed back to the remote peer upon return. This
-                                               // also implies there are no pending HTLCs left on the channel, so we can
-                                               // fully delete it from tracking (the channel monitor is still around to
-                                               // watch for old state broadcasts)!
-                                               (tx, Some(remove_channel!(self, chan_entry)))
-                                       } else { (tx, None) }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -5862,7 +6038,7 @@ where
                        log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
                        self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
-               if let Some(chan) = chan_option {
+               if let Some(ChannelPhase::Funded(chan)) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
@@ -5885,6 +6061,9 @@ where
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
+
                let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -5895,37 +6074,41 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-
-                               let pending_forward_info = match decoded_hop_res {
-                                       Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
-                                               self.construct_pending_htlc_status(msg, shared_secret, next_hop,
-                                                       chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt),
-                                       Err(e) => PendingHTLCStatus::Fail(e)
-                               };
-                               let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
-                                       // If the update_add is completely bogus, the call will Err and we will close,
-                                       // but if we've sent a shutdown and they haven't acknowledged it yet, we just
-                                       // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                       match pending_forward_info {
-                                               PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if (error_code & 0x1000) != 0 {
-                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
-                                                               HTLCFailReason::reason(real_code, error_data)
-                                                       } else {
-                                                               HTLCFailReason::from_failure_code(error_code)
-                                                       }.get_encrypted_failure_packet(incoming_shared_secret, &None);
-                                                       let msg = msgs::UpdateFailHTLC {
-                                                               channel_id: msg.channel_id,
-                                                               htlc_id: msg.htlc_id,
-                                                               reason
-                                                       };
-                                                       PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
-                                               },
-                                               _ => pending_forward_info
-                                       }
-                               };
-                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let pending_forward_info = match decoded_hop_res {
+                                               Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
+                                                       self.construct_pending_htlc_status(msg, shared_secret, next_hop,
+                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+                                               Err(e) => PendingHTLCStatus::Fail(e)
+                                       };
+                                       let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                               // If the update_add is completely bogus, the call will Err and we will close,
+                                               // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+                                               // want to reject the new HTLC and fail it backwards instead of forwarding.
+                                               match pending_forward_info {
+                                                       PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+                                                               let reason = if (error_code & 0x1000) != 0 {
+                                                                       let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+                                                                       HTLCFailReason::reason(real_code, error_data)
+                                                               } else {
+                                                                       HTLCFailReason::from_failure_code(error_code)
+                                                               }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+                                                               let msg = msgs::UpdateFailHTLC {
+                                                                       channel_id: msg.channel_id,
+                                                                       htlc_id: msg.htlc_id,
+                                                                       reason
+                                                               };
+                                                               PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+                                                       },
+                                                       _ => pending_forward_info
+                                               }
+                                       };
+                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5944,19 +6127,37 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
-                                       funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded");
-                                       res
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
+                                               if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
+                                                       peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
+                                                               .or_insert_with(Vec::new)
+                                                               .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
+                                               }
+                                               // Note that we do not need to push an `actions_blocking_raa_monitor_updates`
+                                               // entry here, even though we *do* need to block the next RAA monitor update.
+                                               // We do this instead in the `claim_funds_internal` by attaching a
+                                               // `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
+                                               // outbound HTLC is claimed. This is guaranteed to all complete before we
+                                               // process the RAA as messages are processed from single peers serially.
+                                               funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+                                               res
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
+                                       }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
+               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
                Ok(())
        }
 
        fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -5966,8 +6167,13 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5975,6 +6181,8 @@ where
        }
 
        fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -5984,12 +6192,17 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if (msg.failure_code & 0x8000) == 0 {
                                        let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
-                                       try_chan_entry!(self, Err(chan_err), chan);
+                                       try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry);
+                               }
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
                                }
-                               try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
                                Ok(())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -6006,13 +6219,18 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               let funding_txo = chan.get().context.get_funding_txo();
-                               let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
-                               if let Some(monitor_update) = monitor_update_opt {
-                                       handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
-                                               peer_state, per_peer_state, chan).map(|_| ())
-                               } else { Ok(()) }
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let funding_txo = chan.context.get_funding_txo();
+                                       let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
+                                       if let Some(monitor_update) = monitor_update_opt {
+                                               handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+                                                       peer_state, per_peer_state, chan_phase_entry).map(|_| ())
+                                       } else { Ok(()) }
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -6126,7 +6344,7 @@ where
        /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
        /// the [`ChannelMonitorUpdate`] in question.
        fn raa_monitor_updates_held(&self,
-               actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+               actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
                channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
        ) -> bool {
                actions_blocking_raa_monitor_updates
@@ -6139,6 +6357,23 @@ where
                })
        }
 
+       #[cfg(any(test, feature = "_test_utils"))]
+       pub(crate) fn test_raa_monitor_updates_held(&self,
+               counterparty_node_id: PublicKey, channel_id: ChannelId
+       ) -> bool {
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+                       let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+                       let peer_state = &mut *peer_state_lck;
+
+                       if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
+                               return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+                                       chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+                       }
+               }
+               false
+       }
+
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
                let (htlcs_to_fail, res) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6149,22 +6384,27 @@ where
                                }).map(|mtx| mtx.lock().unwrap())?;
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       let funding_txo_opt = chan.get().context.get_funding_txo();
-                                       let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
-                                               self.raa_monitor_updates_held(
-                                                       &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
-                                                       *counterparty_node_id)
-                                       } else { false };
-                                       let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
-                                               chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan);
-                                       let res = if let Some(monitor_update) = monitor_update_opt {
-                                               let funding_txo = funding_txo_opt
-                                                       .expect("Funding outpoint must have been set for RAA handling to succeed");
-                                               handle_new_monitor_update!(self, funding_txo, monitor_update,
-                                                       peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
-                                       } else { Ok(()) };
-                                       (htlcs_to_fail, res)
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let funding_txo_opt = chan.context.get_funding_txo();
+                                               let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
+                                                       self.raa_monitor_updates_held(
+                                                               &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+                                                               *counterparty_node_id)
+                                               } else { false };
+                                               let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
+                                                       chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
+                                               let res = if let Some(monitor_update) = monitor_update_opt {
+                                                       let funding_txo = funding_txo_opt
+                                                               .expect("Funding outpoint must have been set for RAA handling to succeed");
+                                                       handle_new_monitor_update!(self, funding_txo, monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
+                                               } else { Ok(()) };
+                                               (htlcs_to_fail, res)
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
+                                       }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -6183,8 +6423,13 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -6201,68 +6446,78 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if !chan.get().context.is_usable() {
-                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
-                               }
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       if !chan.context.is_usable() {
+                                               return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
+                                       }
 
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                       msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
-                                               &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
-                                               msg, &self.default_configuration
-                                       ), chan),
-                                       // Note that announcement_signatures fails if the channel cannot be announced,
-                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                       update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()),
-                               });
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                               msg: try_chan_phase_entry!(self, chan.announcement_signatures(
+                                                       &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+                                                       msg, &self.default_configuration
+                                               ), chan_phase_entry),
+                                               // Note that announcement_signatures fails if the channel cannot be announced,
+                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                               update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
+                                       });
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                Ok(())
        }
 
-       /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+       /// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
        fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
                        Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        None => {
                                // It's not a local channel
-                               return Ok(NotifyOption::SkipPersist)
+                               return Ok(NotifyOption::SkipPersistNoEvents)
                        }
                };
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
                if peer_state_mutex_opt.is_none() {
-                       return Ok(NotifyOption::SkipPersist)
+                       return Ok(NotifyOption::SkipPersistNoEvents)
                }
                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(chan_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
-                                       if chan.get().context.should_announce() {
-                                               // If the announcement is about a channel of ours which is public, some
-                                               // other peer may simply be forwarding all its gossip to us. Don't provide
-                                               // a scary-looking error message and return Ok instead.
-                                               return Ok(NotifyOption::SkipPersist);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       if chan.context.get_counterparty_node_id() != *counterparty_node_id {
+                                               if chan.context.should_announce() {
+                                                       // If the announcement is about a channel of ours which is public, some
+                                                       // other peer may simply be forwarding all its gossip to us. Don't provide
+                                                       // a scary-looking error message and return Ok instead.
+                                                       return Ok(NotifyOption::SkipPersistNoEvents);
+                                               }
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+                                       }
+                                       let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
+                                       let msg_from_node_one = msg.contents.flags & 1 == 0;
+                                       if were_node_one == msg_from_node_one {
+                                               return Ok(NotifyOption::SkipPersistNoEvents);
+                                       } else {
+                                               log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
+                                               try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
                                        }
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
-                               }
-                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
-                               let msg_from_node_one = msg.contents.flags & 1 == 0;
-                               if were_node_one == msg_from_node_one {
-                                       return Ok(NotifyOption::SkipPersist);
                                } else {
-                                       log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
-                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
-                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
+                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
                }
                Ok(NotifyOption::DoPersist)
        }
 
-       fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
+       fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
                let htlc_forwards;
                let need_lnd_workaround = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6275,52 +6530,59 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       // Currently, we expect all holding cell update_adds to be dropped on peer
-                                       // disconnect, so Channel's reestablish will never hand us any holding cell
-                                       // freed HTLCs to fail backwards. If in the future we no longer drop pending
-                                       // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
-                                       let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
-                                               msg, &self.logger, &self.node_signer, self.genesis_hash,
-                                               &self.default_configuration, &*self.best_block.read().unwrap()), chan);
-                                       let mut channel_update = None;
-                                       if let Some(msg) = responses.shutdown_msg {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       } else if chan.get().context.is_usable() {
-                                               // If the channel is in a usable state (ie the channel is not being shut
-                                               // down), send a unicast channel_update to our counterparty to make sure
-                                               // they have the latest channel parameters.
-                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                                       channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: chan.get().context.get_counterparty_node_id(),
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               // Currently, we expect all holding cell update_adds to be dropped on peer
+                                               // disconnect, so Channel's reestablish will never hand us any holding cell
+                                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+                                               let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
+                                                       msg, &self.logger, &self.node_signer, self.genesis_hash,
+                                                       &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
+                                               let mut channel_update = None;
+                                               if let Some(msg) = responses.shutdown_msg {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: counterparty_node_id.clone(),
                                                                msg,
                                                        });
+                                               } else if chan.context.is_usable() {
+                                                       // If the channel is in a usable state (ie the channel is not being shut
+                                                       // down), send a unicast channel_update to our counterparty to make sure
+                                                       // they have the latest channel parameters.
+                                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
+                                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                                       node_id: chan.context.get_counterparty_node_id(),
+                                                                       msg,
+                                                               });
+                                                       }
                                                }
+                                               let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
+                                               htlc_forwards = self.handle_channel_resumption(
+                                                       &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
+                                                       Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                               if let Some(upd) = channel_update {
+                                                       peer_state.pending_msg_events.push(upd);
+                                               }
+                                               need_lnd_workaround
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
                                        }
-                                       let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take();
-                                       htlc_forwards = self.handle_channel_resumption(
-                                               &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
-                                               Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
-                                       if let Some(upd) = channel_update {
-                                               peer_state.pending_msg_events.push(upd);
-                                       }
-                                       need_lnd_workaround
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
 
+               let mut persist = NotifyOption::SkipPersistHandleEvents;
                if let Some(forwards) = htlc_forwards {
                        self.forward_htlcs(&mut [forwards][..]);
+                       persist = NotifyOption::DoPersist;
                }
 
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
-               Ok(())
+               Ok(persist)
        }
 
        /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
@@ -6335,8 +6597,8 @@ where
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
                                                if let Some(preimage) = htlc_update.payment_preimage {
-                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", &preimage);
-                                                       self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
+                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
+                                                       self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
@@ -6361,26 +6623,27 @@ where
                                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                                let peer_state = &mut *peer_state_lock;
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                                                               if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
-                                                                       let mut chan = remove_channel!(self, chan_entry);
-                                                                       failed_channels.push(chan.context.force_shutdown(false));
-                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: update
+                                                               if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+                                                                       if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
+                                                                               failed_channels.push(chan.context.force_shutdown(false));
+                                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               msg: update
+                                                                                       });
+                                                                               }
+                                                                               let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+                                                                                       ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+                                                                               } else {
+                                                                                       ClosureReason::CommitmentTxConfirmed
+                                                                               };
+                                                                               self.issue_channel_close_events(&chan.context, reason);
+                                                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                                       node_id: chan.context.get_counterparty_node_id(),
+                                                                                       action: msgs::ErrorAction::SendErrorMessage {
+                                                                                               msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                                       },
                                                                                });
                                                                        }
-                                                                       let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
-                                                                               ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
-                                                                       } else {
-                                                                               ClosureReason::CommitmentTxConfirmed
-                                                                       };
-                                                                       self.issue_channel_close_events(&chan.context, reason);
-                                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                                               node_id: chan.context.get_counterparty_node_id(),
-                                                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                                                       msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
-                                                                               },
-                                                                       });
                                                                }
                                                        }
                                                }
@@ -6426,7 +6689,9 @@ where
                                'chan_loop: loop {
                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                        let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
-                                       for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                       for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
+                                               |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
+                                       ) {
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
@@ -6437,7 +6702,7 @@ where
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
 
-                                                       let channel_id: [u8; 32] = *channel_id;
+                                                       let channel_id: ChannelId = *channel_id;
                                                        let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
                                                                peer_state.channel_by_id.remove(&channel_id));
@@ -6478,38 +6743,43 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|channel_id, chan| {
-                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
-                                               Ok((msg_opt, tx_opt)) => {
-                                                       if let Some(msg) = msg_opt {
-                                                               has_update = true;
-                                                               pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                                       node_id: chan.context.get_counterparty_node_id(), msg,
-                                                               });
-                                                       }
-                                                       if let Some(tx) = tx_opt {
-                                                               // We're done with this channel. We got a closing_signed and sent back
-                                                               // a closing_signed with a closing transaction to broadcast.
-                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: update
-                                                                       });
-                                                               }
+                               peer_state.channel_by_id.retain(|channel_id, phase| {
+                                       match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                                               Ok((msg_opt, tx_opt)) => {
+                                                                       if let Some(msg) = msg_opt {
+                                                                               has_update = true;
+                                                                               pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                                                       node_id: chan.context.get_counterparty_node_id(), msg,
+                                                                               });
+                                                                       }
+                                                                       if let Some(tx) = tx_opt {
+                                                                               // We're done with this channel. We got a closing_signed and sent back
+                                                                               // a closing_signed with a closing transaction to broadcast.
+                                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               msg: update
+                                                                                       });
+                                                                               }
 
-                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
+                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
 
-                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
-                                                               self.tx_broadcaster.broadcast_transactions(&[&tx]);
-                                                               update_maps_on_chan_removal!(self, &chan.context);
-                                                               false
-                                                       } else { true }
+                                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                                               self.tx_broadcaster.broadcast_transactions(&[&tx]);
+                                                                               update_maps_on_chan_removal!(self, &chan.context);
+                                                                               false
+                                                                       } else { true }
+                                                               },
+                                                               Err(e) => {
+                                                                       has_update = true;
+                                                                       let (close_channel, res) = convert_chan_phase_err!(self, e, chan, channel_id, FUNDED_CHANNEL);
+                                                                       handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
+                                                                       !close_channel
+                                                               }
+                                                       }
                                                },
-                                               Err(e) => {
-                                                       has_update = true;
-                                                       let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                                       handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
-                                                       !close_channel
-                                               }
+                                               _ => true, // Retain unfunded channels if present.
                                        }
                                });
                        }
@@ -6702,7 +6972,9 @@ where
                for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for chan in peer_state.channel_by_id.values() {
+                       for chan in peer_state.channel_by_id.values().filter_map(
+                               |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+                       ) {
                                for (htlc_source, _) in chan.inflight_htlc_sources() {
                                        if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
                                                inflight_htlcs.process_path(path, self.get_our_node_id());
@@ -6771,28 +7043,30 @@ where
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
                                        log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
-                                               log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+                                               &channel_funding_outpoint.to_channel_id());
                                        break;
                                }
 
-                               if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
-                                       debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
-                                       if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
-                                               log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
-                                                       log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
-                                               if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
-                                                       peer_state_lck, peer_state, per_peer_state, chan)
-                                               {
-                                                       errors.push((e, counterparty_node_id));
-                                               }
-                                               if further_update_exists {
-                                                       // If there are more `ChannelMonitorUpdate`s to process, restart at the
-                                                       // top of the loop.
-                                                       continue;
+                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
+                                               if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
+                                                       log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+                                                               channel_funding_outpoint.to_channel_id());
+                                                       if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+                                                               peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
+                                                       {
+                                                               errors.push((e, counterparty_node_id));
+                                                       }
+                                                       if further_update_exists {
+                                                               // If there are more `ChannelMonitorUpdate`s to process, restart at the
+                                                               // top of the loop.
+                                                               continue;
+                                                       }
+                                               } else {
+                                                       log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+                                                               channel_funding_outpoint.to_channel_id());
                                                }
-                                       } else {
-                                               log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
-                                                       log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
                                        }
                                }
                        } else {
@@ -6858,8 +7132,8 @@ where
        /// the `MessageSendEvent`s to the specific peer they were generated under.
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut result = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       let mut result = NotifyOption::SkipPersistNoEvents;
 
                        // TODO: This behavior should be documented. It's unintuitive that we query
                        // ChannelMonitors when clearing other events.
@@ -6940,8 +7214,9 @@ where
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                let new_height = height - 1;
                {
                        let mut best_block = self.best_block.write().unwrap();
@@ -6975,8 +7250,9 @@ where
                let block_hash = header.block_hash();
                log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
 
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
 
@@ -6995,8 +7271,9 @@ where
                let block_hash = header.block_hash();
                log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
 
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
                self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
@@ -7029,7 +7306,7 @@ where
                for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for chan in peer_state.channel_by_id.values() {
+                       for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
                                if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
                                        res.push((funding_txo.txid, Some(block_hash)));
                                }
@@ -7039,8 +7316,9 @@ where
        }
 
        fn transaction_unconfirmed(&self, txid: &Txid) {
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
@@ -7079,88 +7357,94 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|_, channel| {
-                                       let res = f(channel);
-                                       if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
-                                               for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                                       let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
-                                                       timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
-                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
-                                               }
-                                               if let Some(channel_ready) = channel_ready_opt {
-                                                       send_channel_ready!(self, pending_msg_events, channel, channel_ready);
-                                                       if channel.context.is_usable() {
-                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id()));
-                                                               if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                                               node_id: channel.context.get_counterparty_node_id(),
-                                                                               msg,
-                                                                       });
+                               peer_state.channel_by_id.retain(|_, phase| {
+                                       match phase {
+                                               // Retain unfunded channels.
+                                               ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
+                                               ChannelPhase::Funded(channel) => {
+                                                       let res = f(channel);
+                                                       if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+                                                               for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
+                                                                       let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
+                                                                       timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
+                                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
+                                                               }
+                                                               if let Some(channel_ready) = channel_ready_opt {
+                                                                       send_channel_ready!(self, pending_msg_events, channel, channel_ready);
+                                                                       if channel.context.is_usable() {
+                                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
+                                                                               if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                                                               node_id: channel.context.get_counterparty_node_id(),
+                                                                                               msg,
+                                                                                       });
+                                                                               }
+                                                                       } else {
+                                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
+                                                                       }
                                                                }
-                                                       } else {
-                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id()));
-                                                       }
-                                               }
 
-                                               {
-                                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                                       emit_channel_ready_event!(pending_events, channel);
-                                               }
+                                                               {
+                                                                       let mut pending_events = self.pending_events.lock().unwrap();
+                                                                       emit_channel_ready_event!(pending_events, channel);
+                                                               }
 
-                                               if let Some(announcement_sigs) = announcement_sigs {
-                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id()));
-                                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                               node_id: channel.context.get_counterparty_node_id(),
-                                                               msg: announcement_sigs,
-                                                       });
-                                                       if let Some(height) = height_opt {
-                                                               if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                                                               msg: announcement,
-                                                                               // Note that announcement_signatures fails if the channel cannot be announced,
-                                                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                                                               update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
+                                                               if let Some(announcement_sigs) = announcement_sigs {
+                                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
+                                                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                                                                               node_id: channel.context.get_counterparty_node_id(),
+                                                                               msg: announcement_sigs,
                                                                        });
+                                                                       if let Some(height) = height_opt {
+                                                                               if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                                                                               msg: announcement,
+                                                                                               // Note that announcement_signatures fails if the channel cannot be announced,
+                                                                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                                                                               update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
+                                                                                       });
+                                                                               }
+                                                                       }
                                                                }
+                                                               if channel.is_our_channel_ready() {
+                                                                       if let Some(real_scid) = channel.context.get_short_channel_id() {
+                                                                               // If we sent a 0conf channel_ready, and now have an SCID, we add it
+                                                                               // to the short_to_chan_info map here. Note that we check whether we
+                                                                               // can relay using the real SCID at relay-time (i.e.
+                                                                               // enforce option_scid_alias then), and if the funding tx is ever
+                                                                               // un-confirmed we force-close the channel, ensuring short_to_chan_info
+                                                                               // is always consistent.
+                                                                               let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
+                                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+                                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
+                                                                                       "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+                                                                                       fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+                                                                       }
+                                                               }
+                                                       } else if let Err(reason) = res {
+                                                               update_maps_on_chan_removal!(self, &channel.context);
+                                                               // It looks like our counterparty went on-chain or funding transaction was
+                                                               // reorged out of the main chain. Close the channel.
+                                                               failed_channels.push(channel.context.force_shutdown(true));
+                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                               msg: update
+                                                                       });
+                                                               }
+                                                               let reason_message = format!("{}", reason);
+                                                               self.issue_channel_close_events(&channel.context, reason);
+                                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                       node_id: channel.context.get_counterparty_node_id(),
+                                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
+                                                                               channel_id: channel.context.channel_id(),
+                                                                               data: reason_message,
+                                                                       } },
+                                                               });
+                                                               return false;
                                                        }
+                                                       true
                                                }
-                                               if channel.is_our_channel_ready() {
-                                                       if let Some(real_scid) = channel.context.get_short_channel_id() {
-                                                               // If we sent a 0conf channel_ready, and now have an SCID, we add it
-                                                               // to the short_to_chan_info map here. Note that we check whether we
-                                                               // can relay using the real SCID at relay-time (i.e.
-                                                               // enforce option_scid_alias then), and if the funding tx is ever
-                                                               // un-confirmed we force-close the channel, ensuring short_to_chan_info
-                                                               // is always consistent.
-                                                               let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
-                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
-                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
-                                                                       "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
-                                                                       fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
-                                                       }
-                                               }
-                                       } else if let Err(reason) = res {
-                                               update_maps_on_chan_removal!(self, &channel.context);
-                                               // It looks like our counterparty went on-chain or funding transaction was
-                                               // reorged out of the main chain. Close the channel.
-                                               failed_channels.push(channel.context.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
-                                               let reason_message = format!("{}", reason);
-                                               self.issue_channel_close_events(&channel.context, reason);
-                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: channel.context.get_counterparty_node_id(),
-                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                               channel_id: channel.context.channel_id(),
-                                                               data: reason_message,
-                                                       } },
-                                               });
-                                               return false;
                                        }
-                                       true
                                });
                        }
                }
@@ -7217,18 +7501,26 @@ where
                }
        }
 
-       /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
+       /// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
+       /// may have events that need processing.
+       ///
+       /// In order to check if this [`ChannelManager`] needs persisting, call
+       /// [`Self::get_and_clear_needs_persistence`].
        ///
        /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
        /// [`ChannelManager`] and should instead register actions to be taken later.
-       ///
-       pub fn get_persistable_update_future(&self) -> Future {
-               self.persistence_notifier.get_future()
+       pub fn get_event_or_persistence_needed_future(&self) -> Future {
+               self.event_persist_notifier.get_future()
+       }
+
+       /// Returns true if this [`ChannelManager`] needs to be persisted.
+       pub fn get_and_clear_needs_persistence(&self) -> bool {
+               self.needs_persist_flag.swap(false, Ordering::AcqRel)
        }
 
        #[cfg(any(test, feature = "_test_utils"))]
-       pub fn get_persistence_condvar_value(&self) -> bool {
-               self.persistence_notifier.notify_pending()
+       pub fn get_event_or_persist_condvar_value(&self) -> bool {
+               self.event_persist_notifier.notify_pending()
        }
 
        /// Gets the latest best block which was connected either via the [`chain::Listen`] or
@@ -7285,8 +7577,21 @@ where
        L::Target: Logger,
 {
        fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // open_channel message - pre-funded channels are never written so there should be no
+               // change to the contents.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_open_channel(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => {
+                                       debug_assert!(false, "We shouldn't close a new channel");
+                                       NotifyOption::DoPersist
+                               },
+                               _ => NotifyOption::SkipPersistHandleEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
@@ -7296,8 +7601,13 @@ where
        }
 
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // accept_channel message - pre-funded channels are never written so there should be no
+               // change to the contents.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+                       NotifyOption::SkipPersistHandleEvents
+               });
        }
 
        fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
@@ -7317,8 +7627,19 @@ where
        }
 
        fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // channel_ready message - while the channel's state will change, any channel_ready message
+               // will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
+               // will not force-close the channel on startup.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_channel_ready(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               _ => NotifyOption::SkipPersistHandleEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
@@ -7332,8 +7653,19 @@ where
        }
 
        fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_add_htlc message - the message itself doesn't change our channel state only the
+               // `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_add_htlc(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
@@ -7342,13 +7674,35 @@ where
        }
 
        fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_fail_htlc message - the message itself doesn't change our channel state only the
+               // `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_fail_htlc(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_fail_malformed_htlc message - the message itself doesn't change our channel state
+               // only the `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_fail_malformed_htlc(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
@@ -7362,8 +7716,19 @@ where
        }
 
        fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_fee message - the message itself doesn't change our channel state only the
+               // `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_fee(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
@@ -7372,23 +7737,32 @@ where
        }
 
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let force_persist = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
                        if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
-                               if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
+                               persist
                        } else {
-                               NotifyOption::SkipPersist
+                               NotifyOption::DoPersist
                        }
                });
        }
 
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_channel_reestablish(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(persist) => *persist,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
+                       self, || NotifyOption::SkipPersistHandleEvents);
+
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
@@ -7398,23 +7772,27 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|_, chan| {
-                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                       if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, &chan.context);
-                                               self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
-                                               return false;
-                                       }
-                                       true
-                               });
-                               peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
-                                       update_maps_on_chan_removal!(self, &chan.context);
-                                       self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
-                                       false
-                               });
-                               peer_state.outbound_v1_channel_by_id.retain(|_, chan| {
-                                       update_maps_on_chan_removal!(self, &chan.context);
-                                       self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+                               peer_state.channel_by_id.retain(|_, phase| {
+                                       let context = match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                                       // We only retain funded channels that are not shutdown.
+                                                       if !chan.is_shutdown() {
+                                                               return true;
+                                                       }
+                                                       &chan.context
+                                               },
+                                               // Unfunded channels will always be removed.
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       &chan.context
+                                               },
+                                               ChannelPhase::UnfundedInboundV1(chan) => {
+                                                       &chan.context
+                                               },
+                                       };
+                                       // Clean up for removal.
+                                       update_maps_on_chan_removal!(self, &context);
+                                       self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
@@ -7483,74 +7861,82 @@ where
                        return Err(());
                }
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               let mut res = Ok(());
 
-               // If we have too many peers connected which don't have funded channels, disconnect the
-               // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
-               // unfunded channels taking up space in memory for disconnected peers, we still let new
-               // peers connect, but we'll reject new channels from them.
-               let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
-               let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       // If we have too many peers connected which don't have funded channels, disconnect the
+                       // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+                       // unfunded channels taking up space in memory for disconnected peers, we still let new
+                       // peers connect, but we'll reject new channels from them.
+                       let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+                       let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
 
-               {
-                       let mut peer_state_lock = self.per_peer_state.write().unwrap();
-                       match peer_state_lock.entry(counterparty_node_id.clone()) {
-                               hash_map::Entry::Vacant(e) => {
-                                       if inbound_peer_limited {
-                                               return Err(());
-                                       }
-                                       e.insert(Mutex::new(PeerState {
-                                               channel_by_id: HashMap::new(),
-                                               outbound_v1_channel_by_id: HashMap::new(),
-                                               inbound_v1_channel_by_id: HashMap::new(),
-                                               inbound_channel_request_by_id: HashMap::new(),
-                                               latest_features: init_msg.features.clone(),
-                                               pending_msg_events: Vec::new(),
-                                               in_flight_monitor_updates: BTreeMap::new(),
-                                               monitor_update_blocked_actions: BTreeMap::new(),
-                                               actions_blocking_raa_monitor_updates: BTreeMap::new(),
-                                               is_connected: true,
-                                       }));
-                               },
-                               hash_map::Entry::Occupied(e) => {
-                                       let mut peer_state = e.get().lock().unwrap();
-                                       peer_state.latest_features = init_msg.features.clone();
-
-                                       let best_block_height = self.best_block.read().unwrap().height();
-                                       if inbound_peer_limited &&
-                                               Self::unfunded_channel_count(&*peer_state, best_block_height) ==
-                                               peer_state.channel_by_id.len()
-                                       {
-                                               return Err(());
-                                       }
+                       {
+                               let mut peer_state_lock = self.per_peer_state.write().unwrap();
+                               match peer_state_lock.entry(counterparty_node_id.clone()) {
+                                       hash_map::Entry::Vacant(e) => {
+                                               if inbound_peer_limited {
+                                                       res = Err(());
+                                                       return NotifyOption::SkipPersistNoEvents;
+                                               }
+                                               e.insert(Mutex::new(PeerState {
+                                                       channel_by_id: HashMap::new(),
+                                                       inbound_channel_request_by_id: HashMap::new(),
+                                                       latest_features: init_msg.features.clone(),
+                                                       pending_msg_events: Vec::new(),
+                                                       in_flight_monitor_updates: BTreeMap::new(),
+                                                       monitor_update_blocked_actions: BTreeMap::new(),
+                                                       actions_blocking_raa_monitor_updates: BTreeMap::new(),
+                                                       is_connected: true,
+                                               }));
+                                       },
+                                       hash_map::Entry::Occupied(e) => {
+                                               let mut peer_state = e.get().lock().unwrap();
+                                               peer_state.latest_features = init_msg.features.clone();
+
+                                               let best_block_height = self.best_block.read().unwrap().height();
+                                               if inbound_peer_limited &&
+                                                       Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+                                                       peer_state.channel_by_id.len()
+                                               {
+                                                       res = Err(());
+                                                       return NotifyOption::SkipPersistNoEvents;
+                                               }
 
-                                       debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
-                                       peer_state.is_connected = true;
-                               },
+                                               debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
+                                               peer_state.is_connected = true;
+                                       },
+                               }
                        }
-               }
 
-               log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+                       log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
-               let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       let pending_msg_events = &mut peer_state.pending_msg_events;
-
-                       // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
-                       // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
-                       // clearing their maps here. Instead we can just send queue channel_reestablish messages for
-                       // channels in the channel_by_id map.
-                       peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
-                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                       node_id: chan.context.get_counterparty_node_id(),
-                                       msg: chan.get_channel_reestablish(&self.logger),
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               let pending_msg_events = &mut peer_state.pending_msg_events;
+
+                               peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
+                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
+                                               // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+                                               // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
+                                               // worry about closing and removing them.
+                                               debug_assert!(false);
+                                               None
+                                       }
+                               ).for_each(|chan| {
+                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                               node_id: chan.context.get_counterparty_node_id(),
+                                               msg: chan.get_channel_reestablish(&self.logger),
+                                       });
                                });
-                       });
-               }
-               //TODO: Also re-broadcast announcement_signatures
-               Ok(())
+                       }
+
+                       return NotifyOption::SkipPersistHandleEvents;
+                       //TODO: Also re-broadcast announcement_signatures
+               });
+               res
        }
 
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
@@ -7567,12 +7953,12 @@ where
                                // very low priority for the LND team despite being marked "P1".
                                // We're not going to bother handling this in a sensible way, instead simply
                                // repeating the Shutdown message on repeat until morale improves.
-                               if msg.channel_id != [0; 32] {
+                               if !msg.channel_id.is_zero() {
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                        let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() { return; }
                                        let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
-                                       if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) {
+                                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
                                                if let Some(msg) = chan.get_outbound_shutdown() {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                                node_id: *counterparty_node_id,
@@ -7596,8 +7982,8 @@ where
                        _ => {}
                }
 
-               if msg.channel_id == [0; 32] {
-                       let channel_ids: Vec<[u8; 32]> = {
+               if msg.channel_id.is_zero() {
+                       let channel_ids: Vec<ChannelId> = {
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
                                if peer_state_mutex_opt.is_none() { return; }
@@ -7606,9 +7992,7 @@ where
                                // Note that we don't bother generating any events for pre-accept channels -
                                // they're not considered "channels" yet from the PoV of our events interface.
                                peer_state.inbound_channel_request_by_id.clear();
-                               peer_state.channel_by_id.keys().cloned()
-                                       .chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
-                                       .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
+                               peer_state.channel_by_id.keys().cloned().collect()
                        };
                        for channel_id in channel_ids {
                                // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
@@ -7622,7 +8006,7 @@ where
                                if peer_state_mutex_opt.is_none() { return; }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
+                               if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
                                        if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
@@ -8203,31 +8587,30 @@ where
                let mut serializable_peer_count: u64 = 0;
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       let mut unfunded_channels = 0;
-                       let mut number_of_channels = 0;
+                       let mut number_of_funded_channels = 0;
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if !peer_state.ok_to_remove(false) {
                                        serializable_peer_count += 1;
                                }
-                               number_of_channels += peer_state.channel_by_id.len();
-                               for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if !channel.context.is_funding_initiated() {
-                                               unfunded_channels += 1;
-                                       }
-                               }
+
+                               number_of_funded_channels += peer_state.channel_by_id.iter().filter(
+                                       |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false }
+                               ).count();
                        }
 
-                       ((number_of_channels - unfunded_channels) as u64).write(writer)?;
+                       (number_of_funded_channels as u64).write(writer)?;
 
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if channel.context.is_funding_initiated() {
-                                               channel.write(writer)?;
-                                       }
+                               for channel in peer_state.channel_by_id.iter().filter_map(
+                                       |(_, phase)| if let ChannelPhase::Funded(channel) = phase {
+                                               if channel.context.is_funding_initiated() { Some(channel) } else { None }
+                                       } else { None }
+                               ) {
+                                       channel.write(writer)?;
                                }
                        }
                }
@@ -8340,6 +8723,8 @@ where
                                                session_priv.write(writer)?;
                                        }
                                }
+                               PendingOutboundPayment::AwaitingInvoice { .. } => {},
+                               PendingOutboundPayment::InvoiceReceived { .. } => {},
                                PendingOutboundPayment::Fulfilled { .. } => {},
                                PendingOutboundPayment::Abandoned { .. } => {},
                        }
@@ -8611,7 +8996,7 @@ where
 
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
@@ -8630,8 +9015,22 @@ where
                                        // But if the channel is behind of the monitor, close the channel:
                                        log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+                                       if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                                       &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+                                       }
+                                       if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
+                                                       &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
+                                       }
+                                       if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
+                                                       &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
+                                       }
+                                       if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
+                                                       &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
+                                       }
                                        let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -8661,13 +9060,13 @@ where
                                                        // backwards leg of the HTLC will simply be rejected.
                                                        log_info!(args.logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
-                                                               log_bytes!(channel.context.channel_id()), &payment_hash);
+                                                               &channel.context.channel_id(), &payment_hash);
                                                        failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                                }
                                        }
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
-                                               log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
+                                               &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
@@ -8675,14 +9074,14 @@ where
                                        if channel.context.is_funding_initiated() {
                                                id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
                                        }
-                                       match peer_channels.entry(channel.context.get_counterparty_node_id()) {
+                                       match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        let by_id_map = entry.get_mut();
-                                                       by_id_map.insert(channel.context.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        let mut by_id_map = HashMap::new();
-                                                       by_id_map.insert(channel.context.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                        entry.insert(by_id_map);
                                                }
                                        }
@@ -8700,7 +9099,7 @@ where
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
                                }, None));
                        } else {
-                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
+                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
                                log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
@@ -8712,7 +9111,7 @@ where
                for (funding_txo, _) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
                                log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
-                                       log_bytes!(funding_txo.to_channel_id()));
+                                       &funding_txo.to_channel_id());
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
@@ -8749,8 +9148,6 @@ where
                let peer_state_from_chans = |channel_by_id| {
                        PeerState {
                                channel_by_id,
-                               outbound_v1_channel_by_id: HashMap::new(),
-                               inbound_v1_channel_by_id: HashMap::new(),
                                inbound_channel_request_by_id: HashMap::new(),
                                latest_features: InitFeatures::empty(),
                                pending_msg_events: Vec::new(),
@@ -8765,7 +9162,7 @@ where
                let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                for _ in 0..peer_count {
                        let peer_pubkey = Readable::read(reader)?;
-                       let peer_chans = peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+                       let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
                        let mut peer_state = peer_state_from_chans(peer_chans);
                        peer_state.latest_features = Readable::read(reader)?;
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -8896,7 +9293,7 @@ where
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
                                        log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
-                                               update.update_id, $channel_info_log, log_bytes!($funding_txo.to_channel_id()));
+                                               update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -8926,30 +9323,37 @@ where
                for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mtx.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for (_, chan) in peer_state.channel_by_id.iter() {
-                               // Channels that were persisted have to be funded, otherwise they should have been
-                               // discarded.
-                               let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
-                               let monitor = args.channel_monitors.get(&funding_txo)
-                                       .expect("We already checked for monitor presence when loading channels");
-                               let mut max_in_flight_update_id = monitor.get_latest_update_id();
-                               if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
-                                       if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
-                                               max_in_flight_update_id = cmp::max(max_in_flight_update_id,
-                                                       handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
-                                                               funding_txo, monitor, peer_state, ""));
+                       for phase in peer_state.channel_by_id.values() {
+                               if let ChannelPhase::Funded(chan) = phase {
+                                       // Channels that were persisted have to be funded, otherwise they should have been
+                                       // discarded.
+                                       let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                                       let monitor = args.channel_monitors.get(&funding_txo)
+                                               .expect("We already checked for monitor presence when loading channels");
+                                       let mut max_in_flight_update_id = monitor.get_latest_update_id();
+                                       if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
+                                               if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
+                                                       max_in_flight_update_id = cmp::max(max_in_flight_update_id,
+                                                               handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
+                                                                       funding_txo, monitor, peer_state, ""));
+                                               }
                                        }
-                               }
-                               if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
-                                       // If the channel is ahead of the monitor, return InvalidValue:
-                                       log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
-                                               log_bytes!(chan.context.channel_id()), monitor.get_latest_update_id(), max_in_flight_update_id);
-                                       log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
-                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
+                                               // If the channel is ahead of the monitor, return InvalidValue:
+                                               log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+                                                       chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
+                                               log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+                                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                               log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                               return Err(DecodeError::InvalidValue);
+                                       }
+                               } else {
+                                       // We shouldn't have persisted (or read) any unfunded channel types so none should have been
+                                       // created in this `channel_by_id` map.
+                                       debug_assert!(false);
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
@@ -8970,7 +9374,7 @@ where
                                } else {
                                        log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
-                                               log_bytes!(funding_txo.to_channel_id()));
+                                               &funding_txo.to_channel_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -9056,7 +9460,7 @@ where
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                                log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                                       &htlc.payment_hash, log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+                                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                                false
                                                                                        } else { true }
                                                                                } else { true }
@@ -9066,7 +9470,7 @@ where
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                       &htlc.payment_hash, log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
                                                                                                intercepted_id != ev_id
@@ -9114,6 +9518,7 @@ where
                                                                        // downstream chan is closed (because we don't have a
                                                                        // channel_id -> peer map entry).
                                                                        counterparty_opt.is_none(),
+                                                                       counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
                                                                        monitor.get_funding_txo().0))
                                                        } else { None }
                                                } else {
@@ -9221,28 +9626,35 @@ where
                for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
-                               if chan.context.outbound_scid_alias() == 0 {
-                                       let mut outbound_scid_alias;
-                                       loop {
-                                               outbound_scid_alias = fake_scid::Namespace::OutboundAlias
-                                                       .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
-                                               if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
-                                       }
-                                       chan.context.set_outbound_scid_alias(outbound_scid_alias);
-                               } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
-                                       // Note that in rare cases its possible to hit this while reading an older
-                                       // channel if we just happened to pick a colliding outbound alias above.
-                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
-                                       return Err(DecodeError::InvalidValue);
-                               }
-                               if chan.context.is_usable() {
-                                       if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
+                       for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
+                               if let ChannelPhase::Funded(chan) = phase {
+                                       if chan.context.outbound_scid_alias() == 0 {
+                                               let mut outbound_scid_alias;
+                                               loop {
+                                                       outbound_scid_alias = fake_scid::Namespace::OutboundAlias
+                                                               .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
+                                                       if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
+                                               }
+                                               chan.context.set_outbound_scid_alias(outbound_scid_alias);
+                                       } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
                                                // Note that in rare cases its possible to hit this while reading an older
                                                // channel if we just happened to pick a colliding outbound alias above.
                                                log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                return Err(DecodeError::InvalidValue);
                                        }
+                                       if chan.context.is_usable() {
+                                               if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
+                                                       // Note that in rare cases its possible to hit this while reading an older
+                                                       // channel if we just happened to pick a colliding outbound alias above.
+                                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+                                                       return Err(DecodeError::InvalidValue);
+                                               }
+                                       }
+                               } else {
+                                       // We shouldn't have persisted (or read) any unfunded channel types so none should have been
+                                       // created in this `channel_by_id` map.
+                                       debug_assert!(false);
+                                       return Err(DecodeError::InvalidValue);
                                }
                        }
                }
@@ -9284,7 +9696,7 @@ where
                                                        let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
-                                                       if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
+                                                       if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
                                                                channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
                                                        }
                                                }
@@ -9368,7 +9780,9 @@ where
                        pending_background_events: Mutex::new(pending_background_events),
                        total_consistency_lock: RwLock::new(()),
                        background_events_processed_since_startup: AtomicBool::new(false),
-                       persistence_notifier: Notifier::new(),
+
+                       event_persist_notifier: Notifier::new(),
+                       needs_persist_flag: AtomicBool::new(false),
 
                        entropy_source: args.entropy_source,
                        node_signer: args.node_signer,
@@ -9385,12 +9799,12 @@ where
                        channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
 
-               for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay {
+               for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
                        // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
                        channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
-                               downstream_closed, downstream_funding);
+                               downstream_closed, downstream_node_id, downstream_funding);
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -9408,6 +9822,7 @@ mod tests {
        use core::sync::atomic::Ordering;
        use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use crate::ln::ChannelId;
        use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{self, ErrorAction};
@@ -9429,9 +9844,9 @@ mod tests {
 
                // All nodes start with a persistable update pending as `create_network` connects each node
                // with all other nodes to make most tests simpler.
-               assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
+               assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
@@ -9445,19 +9860,19 @@ mod tests {
                        &nodes[0].node.get_our_node_id()).pop().unwrap();
 
                // The first two nodes (which opened a channel) should now require fresh persistence
-               assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
                // ... but the last node should not.
-               assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
                // After persisting the first two nodes they should no longer need fresh persistence.
-               assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
                // about the channel.
                nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
                nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
-               assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                // The nodes which are a party to the channel should also ignore messages from unrelated
                // parties.
@@ -9465,8 +9880,8 @@ mod tests {
                nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
                nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
                nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
-               assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                // At this point the channel info given by peers should still be the same.
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
@@ -9483,8 +9898,8 @@ mod tests {
                // persisted and that its channel info remains the same.
                nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
                nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
-               assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
 
@@ -9492,8 +9907,8 @@ mod tests {
                // the channel info has updated.
                nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
                nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
-               assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
                assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
        }
@@ -9644,13 +10059,12 @@ mod tests {
 
                // To start (1), send a regular payment but don't claim it.
                let expected_route = [&nodes[1]];
-               let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+               let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
 
                // Next, attempt a keysend payment and make sure it fails.
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
-                       final_value_msat: 100_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
+                       TEST_FINAL_CLTV, false), 100_000);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
                        None, nodes[0].logger, &scorer, &(), &random_seed_bytes
@@ -9738,10 +10152,10 @@ mod tests {
                pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
 
                // Next, attempt a keysend payment and make sure it fails.
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
-                       final_value_msat: 100_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
+                       100_000
+               );
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
                        None, nodes[0].logger, &scorer, &(), &random_seed_bytes
@@ -9787,10 +10201,8 @@ mod tests {
                let payee_pubkey = nodes[1].node.get_our_node_id();
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-                       final_value_msat: 10_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
                let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let scorer = test_utils::TestScorer::new();
@@ -9834,10 +10246,8 @@ mod tests {
                let payee_pubkey = nodes[1].node.get_our_node_id();
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-                       final_value_msat: 10_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
                let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let scorer = test_utils::TestScorer::new();
@@ -9981,7 +10391,7 @@ mod tests {
                nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
 
                let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
-               let channel_id = &tx.txid().into_inner();
+               let channel_id = ChannelId::from_bytes(tx.txid().into_inner());
                {
                        // Ensure that the `id_to_peer` map is empty until either party has received the
                        // funding transaction, and have the real `channel_id`.
@@ -9995,7 +10405,7 @@ mod tests {
                        // as it has the funding transaction.
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(channel_id));
+                       assert!(nodes_0_lock.contains_key(&channel_id));
                }
 
                assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
@@ -10006,7 +10416,7 @@ mod tests {
                {
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(channel_id));
+                       assert!(nodes_0_lock.contains_key(&channel_id));
                }
                expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
 
@@ -10015,7 +10425,7 @@ mod tests {
                        // as it has the funding transaction.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(channel_id));
+                       assert!(nodes_1_lock.contains_key(&channel_id));
                }
                check_added_monitors!(nodes[1], 1);
                let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
@@ -10026,7 +10436,7 @@ mod tests {
                let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
                update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
 
-               nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
                nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
                let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
@@ -10040,7 +10450,7 @@ mod tests {
                        // party's signature for the fee negotiated closing transaction.)
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(channel_id));
+                       assert!(nodes_0_lock.contains_key(&channel_id));
                }
 
                {
@@ -10050,7 +10460,7 @@ mod tests {
                        // kept in the `nodes[1]`'s `id_to_peer` map.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(channel_id));
+                       assert!(nodes_1_lock.contains_key(&channel_id));
                }
 
                nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
@@ -10066,7 +10476,7 @@ mod tests {
                        // doesn't have `nodes[0]`'s signature for the closing transaction yet.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(channel_id));
+                       assert!(nodes_1_lock.contains_key(&channel_id));
                }
 
                let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
@@ -10117,7 +10527,7 @@ mod tests {
                let nodes = create_network(2, &node_cfg, &node_chanmgr);
 
                // Dummy values
-               let channel_id = [4; 32];
+               let channel_id = ChannelId::from_bytes([4; 32]);
                let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
                let intercept_id = InterceptId([0; 32]);
 
@@ -10172,11 +10582,11 @@ mod tests {
                                check_added_monitors!(nodes[0], 1);
                                expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
                        }
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
-               open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
                        open_channel_msg.temporary_channel_id);
@@ -10227,7 +10637,7 @@ mod tests {
                for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
                        nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
@@ -10267,7 +10677,7 @@ mod tests {
                for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
                        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
@@ -10319,7 +10729,7 @@ mod tests {
                                _ => panic!("Unexpected event"),
                        }
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // If we try to accept a channel from another peer non-0conf it will fail.
@@ -10535,7 +10945,7 @@ mod tests {
 
                // If we provide a channel_id not associated with the peer, we should get an error and no updates
                // should be applied to ensure update atomicity as specified in the API docs.
-               let bad_channel_id = [10; 32];
+               let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
                let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
                let new_fee = current_fee + 100;
                assert!(
@@ -10582,7 +10992,7 @@ pub mod bench {
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
 
-       use crate::sync::{Arc, Mutex};
+       use crate::sync::{Arc, Mutex, RwLock};
 
        use criterion::Criterion;
 
@@ -10619,7 +11029,7 @@ pub mod bench {
                let tx_broadcaster = test_utils::TestBroadcaster::new(network);
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
 
                let mut config: UserConfig = Default::default();
@@ -10734,9 +11144,9 @@ pub mod bench {
                                let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
 
                                $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
-                                       PaymentId(payment_hash.0), RouteParameters {
-                                               payment_params, final_value_msat: 10_000,
-                                       }, Retry::Attempts(0)).unwrap();
+                                       PaymentId(payment_hash.0),
+                                       RouteParameters::from_payment_params_and_value(payment_params, 10_000),
+                                       Retry::Attempts(0)).unwrap();
                                let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
                                $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
                                $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
index 1992b8db9e565b7696e60bc286581583ca8beed7..bcaa91ab266fe7f10ebbc8de710fc30d927365c3 100644 (file)
@@ -798,6 +798,35 @@ impl<T: sealed::Context> Features<T> {
                true
        }
 
+       /// Sets a required feature bit. Errors if `bit` is outside the feature range as defined
+       /// by [BOLT 9].
+       ///
+       /// Note: Required bits are even. If an odd bit is given, then the corresponding even bit will
+       /// be set instead (i.e., `bit - 1`).
+       ///
+       /// [BOLT 9]: https://github.com/lightning/bolts/blob/master/09-features.md
+       pub fn set_required_feature_bit(&mut self, bit: usize) -> Result<(), ()> {
+               self.set_feature_bit(bit - (bit % 2))
+       }
+
+       /// Sets an optional feature bit. Errors if `bit` is outside the feature range as defined
+       /// by [BOLT 9].
+       ///
+       /// Note: Optional bits are odd. If an even bit is given, then the corresponding odd bit will be
+       /// set instead (i.e., `bit + 1`).
+       ///
+       /// [BOLT 9]: https://github.com/lightning/bolts/blob/master/09-features.md
+       pub fn set_optional_feature_bit(&mut self, bit: usize) -> Result<(), ()> {
+               self.set_feature_bit(bit + (1 - (bit % 2)))
+       }
+
+       fn set_feature_bit(&mut self, bit: usize) -> Result<(), ()> {
+               if bit > 255 {
+                       return Err(());
+               }
+               self.set_bit(bit, false)
+       }
+
        /// Sets a required custom feature bit. Errors if `bit` is outside the custom range as defined
        /// by [bLIP 2] or if it is a known `T` feature.
        ///
@@ -824,10 +853,13 @@ impl<T: sealed::Context> Features<T> {
                if bit < 256 {
                        return Err(());
                }
+               self.set_bit(bit, true)
+       }
 
+       fn set_bit(&mut self, bit: usize, custom: bool) -> Result<(), ()> {
                let byte_offset = bit / 8;
                let mask = 1 << (bit - 8 * byte_offset);
-               if byte_offset < T::KNOWN_FEATURE_MASK.len() {
+               if byte_offset < T::KNOWN_FEATURE_MASK.len() && custom {
                        if (T::KNOWN_FEATURE_MASK[byte_offset] & mask) != 0 {
                                return Err(());
                        }
@@ -1078,6 +1110,13 @@ mod tests {
                assert!(!features.requires_basic_mpp());
                assert!(features.requires_payment_secret());
                assert!(features.supports_payment_secret());
+
+               // Set flags manually
+               let mut features = NodeFeatures::empty();
+               assert!(features.set_optional_feature_bit(55).is_ok());
+               assert!(features.supports_keysend());
+               assert!(features.set_optional_feature_bit(255).is_ok());
+               assert!(features.set_required_feature_bit(256).is_err());
        }
 
        #[test]
index 7f1ded0323ecd3804ac3cb3c90cf6b5062fbe411..f6684485dba0d69964686b145b9fd3b383c854cc 100644 (file)
@@ -16,14 +16,14 @@ use crate::chain::channelmonitor::ChannelMonitor;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
 use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource};
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
 use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
-use crate::routing::router::{self, PaymentParameters, Route};
+use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
 use crate::ln::features::InitFeatures;
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::scid_utils;
 use crate::util::test_utils;
 use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
@@ -43,10 +43,10 @@ use crate::io;
 use crate::prelude::*;
 use core::cell::RefCell;
 use alloc::rc::Rc;
-use crate::sync::{Arc, Mutex, LockTestExt};
+use crate::sync::{Arc, Mutex, LockTestExt, RwLock};
 use core::mem;
 use core::iter::repeat;
-use bitcoin::{PackedLockTime, TxMerkleNode};
+use bitcoin::{PackedLockTime, TxIn, TxMerkleNode};
 
 pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
@@ -73,6 +73,20 @@ pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Tra
        let height = node.best_block_info().1 + 1;
        confirm_transactions_at(node, txn, height);
 }
+/// Mine a single block containing the given transaction without extra consistency checks which may
+/// impact ChannelManager state.
+pub fn mine_transaction_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
+       let height = node.best_block_info().1 + 1;
+       let mut block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
+               txdata: Vec::new(),
+       };
+       for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
+               block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
+       }
+       block.txdata.push((*tx).clone());
+       do_connect_block_without_consistency_checks(node, block, false);
+}
 /// Mine the given transaction at the given height, mining blocks as required to build to that
 /// height
 ///
@@ -211,16 +225,16 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
        assert!(depth >= 1);
        for i in 1..depth {
                let prev_blockhash = block.header.block_hash();
-               do_connect_block(node, block, skip_intermediaries);
+               do_connect_block_with_consistency_checks(node, block, skip_intermediaries);
                block = create_dummy_block(prev_blockhash, height + i, Vec::new());
        }
        let hash = block.header.block_hash();
-       do_connect_block(node, block, false);
+       do_connect_block_with_consistency_checks(node, block, false);
        hash
 }
 
 pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
-       do_connect_block(node, block.clone(), false);
+       do_connect_block_with_consistency_checks(node, block.clone(), false);
 }
 
 fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
@@ -230,8 +244,14 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
        }
 }
 
-fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
+fn do_connect_block_with_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
        call_claimable_balances(node);
+       do_connect_block_without_consistency_checks(node, block, skip_intermediaries);
+       call_claimable_balances(node);
+       node.node.test_process_background_events();
+}
+
+fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
        let height = node.best_block_info().1 + 1;
        #[cfg(feature = "std")] {
                eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
@@ -286,8 +306,6 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, sk
                        }
                }
        }
-       call_claimable_balances(node);
-       node.node.test_process_background_events();
 
        for tx in &block.txdata {
                for input in &tx.input {
@@ -352,7 +370,7 @@ pub struct TestChanMonCfg {
        pub persister: test_utils::TestPersister,
        pub logger: test_utils::TestLogger,
        pub keys_manager: test_utils::TestKeysInterface,
-       pub scorer: Mutex<test_utils::TestScorer>,
+       pub scorer: RwLock<test_utils::TestScorer>,
 }
 
 pub struct NodeCfg<'a> {
@@ -520,7 +538,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
-                                       let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+                                       let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
                                                &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap();
                                        deserialized_monitors.push(deserialized_monitor);
                                }
@@ -539,7 +557,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                        channel_monitors.insert(monitor.get_funding_txo().0, monitor);
                                }
 
-                               let scorer = Mutex::new(test_utils::TestScorer::new());
+                               let scorer = RwLock::new(test_utils::TestScorer::new());
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                self.node.write(&mut w).unwrap();
                                <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(w.0), ChannelManagerReadArgs {
@@ -570,11 +588,11 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
        }
 }
 
-pub fn create_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
 }
 
-pub fn create_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        let (channel_ready, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &channel_ready);
        (announcement, as_update, bs_update, channel_id, tx)
@@ -811,36 +829,14 @@ macro_rules! get_channel_ref {
        }
 }
 
-#[cfg(test)]
-macro_rules! get_outbound_v1_channel_ref {
-       ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
-               {
-                       $per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
-                       $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
-                       $peer_state_lock.outbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
-               }
-       }
-}
-
-#[cfg(test)]
-macro_rules! get_inbound_v1_channel_ref {
-       ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
-               {
-                       $per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
-                       $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
-                       $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
-               }
-       }
-}
-
 #[cfg(test)]
 macro_rules! get_feerate {
        ($node: expr, $counterparty_node: expr, $channel_id: expr) => {
                {
                        let mut per_peer_state_lock;
                        let mut peer_state_lock;
-                       let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
-                       chan.context.get_feerate_sat_per_1000_weight()
+                       let phase = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
+                       phase.context().get_feerate_sat_per_1000_weight()
                }
        }
 }
@@ -852,7 +848,7 @@ macro_rules! get_channel_type_features {
                        let mut per_peer_state_lock;
                        let mut peer_state_lock;
                        let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
-                       chan.context.get_channel_type().clone()
+                       chan.context().get_channel_type().clone()
                }
        }
 }
@@ -868,7 +864,7 @@ macro_rules! get_monitor {
                        for index in 0..2 {
                                if let Ok(mon) = $node.chain_monitor.chain_monitor.get_monitor(
                                        $crate::chain::transaction::OutPoint {
-                                               txid: bitcoin::Txid::from_slice(&$channel_id[..]).unwrap(), index
+                                               txid: bitcoin::Txid::from_slice(&$channel_id.0[..]).unwrap(), index
                                        })
                                {
                                        monitor = Some(mon);
@@ -952,7 +948,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
        let mut monitors_read = Vec::with_capacity(monitors_encoded.len());
        for encoded in monitors_encoded {
                let mut monitor_read = &encoded[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>
                        ::read(&mut monitor_read, (node.keys_manager, node.keys_manager)).unwrap();
                assert!(monitor_read.is_empty());
                monitors_read.push(monitor);
@@ -1005,7 +1001,23 @@ macro_rules! reload_node {
        };
 }
 
-pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> ([u8; 32], Transaction, OutPoint) {
+pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>,
+       expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128)
+ -> (ChannelId, Transaction, OutPoint)
+{
+       internal_create_funding_transaction(node, expected_counterparty_node_id, expected_chan_value, expected_user_chan_id, false)
+}
+
+pub fn create_coinbase_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>,
+       expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128)
+ -> (ChannelId, Transaction, OutPoint)
+{
+       internal_create_funding_transaction(node, expected_counterparty_node_id, expected_chan_value, expected_user_chan_id, true)
+}
+
+fn internal_create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>,
+       expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128,
+       coinbase: bool) -> (ChannelId, Transaction, OutPoint) {
        let chan_id = *node.network_chan_count.borrow();
 
        let events = node.node.get_and_clear_pending_events();
@@ -1016,7 +1028,16 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_
                        assert_eq!(*channel_value_satoshis, expected_chan_value);
                        assert_eq!(user_channel_id, expected_user_chan_id);
 
-                       let tx = Transaction { version: chan_id as i32, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       let input = if coinbase {
+                               vec![TxIn {
+                                       previous_output: bitcoin::OutPoint::null(),
+                                       ..Default::default()
+                               }]
+                       } else {
+                               Vec::new()
+                       };
+
+                       let tx = Transaction { version: chan_id as i32, lock_time: PackedLockTime::ZERO, input, output: vec![TxOut {
                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                        }]};
                        let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
@@ -1025,7 +1046,8 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_
                _ => panic!("Unexpected event"),
        }
 }
-pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: [u8; 32]) -> Transaction {
+
+pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId) -> Transaction {
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42);
        assert_eq!(temporary_channel_id, expected_temporary_channel_id);
 
@@ -1068,7 +1090,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &
 }
 
 // Receiver must have been initialized with manually_accept_inbound_channels set to true.
-pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option<UserConfig>) -> (bitcoin::Transaction, [u8; 32]) {
+pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option<UserConfig>) -> (bitcoin::Transaction, ChannelId) {
        let initiator_channels = initiator.node.list_usable_channels().len();
        let receiver_channels = receiver.node.list_usable_channels().len();
 
@@ -1166,7 +1188,7 @@ pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_r
        node_recv.node.handle_channel_ready(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendChannelReady, node_recv.node.get_our_node_id()));
 }
 
-pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId) {
        let channel_id;
        let events_6 = node_conf.node.get_and_clear_pending_msg_events();
        assert_eq!(events_6.len(), 3);
@@ -1193,7 +1215,7 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv
        }), channel_id)
 }
 
-pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId) {
        let conf_height = core::cmp::max(node_a.best_block_info().1 + 1, node_b.best_block_info().1 + 1);
        create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height);
        confirm_transaction_at(node_a, tx, conf_height);
@@ -1202,7 +1224,7 @@ pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a:
        create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 }
 
-pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId, Transaction) {
        let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
        let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
        (msgs, chan_id, tx)
@@ -1242,11 +1264,11 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b,
        ((*announcement).clone(), as_update, bs_update)
 }
 
-pub fn create_announced_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_announced_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
 }
 
-pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
        update_nodes_with_chan_announce(nodes, a, b, &chan_announcement.0, &chan_announcement.1, &chan_announcement.2);
        (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
@@ -1478,7 +1500,7 @@ macro_rules! check_closed_event {
        }
 }
 
-pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
+pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &ChannelId, funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
        let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
        let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
        let (tx_a, tx_b);
@@ -1768,20 +1790,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '
        check_added_monitors!(node_a, 1);
 
        // If this commitment signed dance was due to a claim, don't check for an RAA monitor update.
-       let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| {
-               let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate
-                       { channel_funding_outpoint, counterparty_node_id }) = action
-               {
-                       if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id {
-                               assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id());
-                               true
-                       } else { false }
-               } else { false };
-               if matching_action {
-                       if let Event::PaymentSent { .. } = ev {} else { panic!(); }
-               }
-               matching_action
-       });
+       let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id);
        if fail_backwards { assert!(!got_claim); }
        commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim);
 
@@ -1838,14 +1847,14 @@ macro_rules! get_payment_preimage_hash {
 }
 
 /// Gets a route from the given sender to the node described in `payment_params`.
-pub fn get_route(send_node: &Node, payment_params: &PaymentParameters, recv_value: u64) -> Result<Route, msgs::LightningError> {
+pub fn get_route(send_node: &Node, route_params: &RouteParameters) -> Result<Route, msgs::LightningError> {
        let scorer = TestScorer::new();
        let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
        router::get_route(
-               &send_node.node.get_our_node_id(), payment_params, &send_node.network_graph.read_only(),
+               &send_node.node.get_our_node_id(), route_params, &send_node.network_graph.read_only(),
                Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
-               recv_value, send_node.logger, &scorer, &(), &random_seed_bytes
+               send_node.logger, &scorer, &(), &random_seed_bytes
        )
 }
 
@@ -1854,9 +1863,10 @@ pub fn get_route(send_node: &Node, payment_params: &PaymentParameters, recv_valu
 /// Don't use this, use the identically-named function instead.
 #[macro_export]
 macro_rules! get_route {
-       ($send_node: expr, $payment_params: expr, $recv_value: expr) => {
-               $crate::ln::functional_test_utils::get_route(&$send_node, &$payment_params, $recv_value)
-       }
+       ($send_node: expr, $payment_params: expr, $recv_value: expr) => {{
+               let route_params = $crate::routing::router::RouteParameters::from_payment_params_and_value($payment_params, $recv_value);
+               $crate::ln::functional_test_utils::get_route(&$send_node, &route_params)
+       }}
 }
 
 #[cfg(test)]
@@ -1868,9 +1878,10 @@ macro_rules! get_route_and_payment_hash {
                $crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value)
        }};
        ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr) => {{
+               let route_params = $crate::routing::router::RouteParameters::from_payment_params_and_value($payment_params, $recv_value);
                let (payment_preimage, payment_hash, payment_secret) =
                        $crate::ln::functional_test_utils::get_payment_preimage_hash(&$recv_node, Some($recv_value), None);
-               let route = $crate::ln::functional_test_utils::get_route(&$send_node, &$payment_params, $recv_value);
+               let route = $crate::ln::functional_test_utils::get_route(&$send_node, &route_params);
                (route.unwrap(), payment_hash, payment_preimage, payment_secret)
        }}
 }
@@ -1994,29 +2005,38 @@ macro_rules! expect_payment_path_successful {
        }
 }
 
+pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
+       event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
+       upstream_force_closed: bool, downstream_force_closed: bool
+) {
+       match event {
+               Event::PaymentForwarded {
+                       fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
+                       outbound_amount_forwarded_msat: _
+               } => {
+                       assert_eq!(fee_earned_msat, expected_fee);
+                       if !upstream_force_closed {
+                               // Is the event prev_channel_id in one of the channels between the two nodes?
+                               assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
+                       }
+                       // We check for force closures since a force closed channel is removed from the
+                       // node's channel list
+                       if !downstream_force_closed {
+                               assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
+                       }
+                       assert_eq!(claim_from_onchain_tx, downstream_force_closed);
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
 macro_rules! expect_payment_forwarded {
        ($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
-               let events = $node.node.get_and_clear_pending_events();
+               let mut events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
-               match events[0] {
-                       Event::PaymentForwarded {
-                               fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
-                               outbound_amount_forwarded_msat: _
-                       } => {
-                               assert_eq!(fee_earned_msat, $expected_fee);
-                               if fee_earned_msat.is_some() {
-                                       // Is the event prev_channel_id in one of the channels between the two nodes?
-                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $prev_node.node.get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
-                               }
-                               // We check for force closures since a force closed channel is removed from the
-                               // node's channel list
-                               if !$downstream_force_closed {
-                                       assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
-                               }
-                               assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
-                       },
-                       _ => panic!("Unexpected event"),
-               }
+               $crate::ln::functional_test_utils::expect_payment_forwarded(
+                       events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee,
+                       $upstream_force_closed, $downstream_force_closed);
        }
 }
 
@@ -2386,14 +2406,14 @@ pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, '
                                                let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
                                                        .unwrap().lock().unwrap();
                                                let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
-                                               if let Some(prev_config) = channel.context.prev_config() {
+                                               if let Some(prev_config) = channel.context().prev_config() {
                                                        prev_config.forwarding_fee_base_msat
                                                } else {
-                                                       channel.context.config().forwarding_fee_base_msat
+                                                       channel.context().config().forwarding_fee_base_msat
                                                }
                                        };
                                        if $idx == 1 { fee += expected_extra_fees[i]; }
-                                       expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
+                                       expect_payment_forwarded!(*$node, $next_node, $prev_node, Some(fee as u64), false, false);
                                        expected_total_fee_msat += fee as u64;
                                        check_added_monitors!($node, 1);
                                        let new_next_msgs = if $new_msgs {
@@ -2461,10 +2481,11 @@ pub fn claim_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route:
 
 pub const TEST_FINAL_CLTV: u32 = 70;
 
-pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
+pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret, PaymentId) {
        let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(expected_route.last().unwrap().node.invoice_features()).unwrap();
-       let route = get_route(origin_node, &payment_params, recv_value).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value);
+       let route = get_route(origin_node, &route_params).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
@@ -2472,20 +2493,20 @@ pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route:
        }
 
        let res = send_along_route(origin_node, route, expected_route, recv_value);
-       (res.0, res.1, res.2)
+       (res.0, res.1, res.2, res.3)
 }
 
 pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64)  {
        let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(expected_route.last().unwrap().node.invoice_features()).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value);
        let network_graph = origin_node.network_graph.read_only();
        let scorer = test_utils::TestScorer::new();
        let seed = [0u8; 32];
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
-       let route = router::get_route(
-               &origin_node.node.get_our_node_id(), &payment_params, &network_graph,
-               None, recv_value, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route = router::get_route(&origin_node.node.get_our_node_id(), &route_params, &network_graph,
+               None, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
@@ -2499,7 +2520,7 @@ pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_rou
                assert!(err.contains("Cannot send value that would put us over the max HTLC value in flight our peer will accept")));
 }
 
-pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
+pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret, PaymentId) {
        let res = route_payment(&origin, expected_route, recv_value);
        claim_payment(&origin, expected_route, res.0);
        res
@@ -2634,7 +2655,7 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
                let persister = test_utils::TestPersister::new();
                let seed = [i as u8; 32];
                let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
 
                chan_mon_cfgs.push(TestChanMonCfg { tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager, scorer });
        }
@@ -2646,7 +2667,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMon
        create_node_cfgs_with_persisters(node_count, chanmon_cfgs, chanmon_cfgs.iter().map(|c| &c.persister).collect())
 }
 
-pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>, persisters: Vec<&'a impl Persist<EnforcingSigner>>) -> Vec<NodeCfg<'a>> {
+pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>, persisters: Vec<&'a impl Persist<TestChannelSigner>>) -> Vec<NodeCfg<'a>> {
        let mut nodes = Vec::new();
 
        for i in 0..node_count {
@@ -2766,7 +2787,7 @@ pub enum HTLCType { NONE, TIMEOUT, SUCCESS }
 ///
 /// All broadcast transactions must be accounted for in one of the above three types of we'll
 /// also fail.
-pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
+pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
        let mut txn_seen = HashSet::new();
        node_txn.retain(|tx| txn_seen.insert(tx.txid()));
@@ -2912,7 +2933,9 @@ macro_rules! get_channel_value_stat {
        ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{
                let peer_state_lock = $node.node.per_peer_state.read().unwrap();
                let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
-               let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap();
+               let chan = chan_lock.channel_by_id.get(&$channel_id).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                chan.get_value_stat()
        }}
 }
@@ -3021,7 +3044,11 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> {
        pub node_a: &'a Node<'b, 'c, 'd>,
        pub node_b: &'a Node<'b, 'c, 'd>,
        pub send_channel_ready: (bool, bool),
-       pub pending_htlc_adds: (i64, i64),
+       pub pending_responding_commitment_signed: (bool, bool),
+       /// Indicates that the pending responding commitment signed will be a dup for the recipient,
+       /// and no monitor update is expected
+       pub pending_responding_commitment_signed_dup_monitor: (bool, bool),
+       pub pending_htlc_adds: (usize, usize),
        pub pending_htlc_claims: (usize, usize),
        pub pending_htlc_fails: (usize, usize),
        pub pending_cell_htlc_claims: (usize, usize),
@@ -3035,6 +3062,8 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
                        node_a,
                        node_b,
                        send_channel_ready: (false, false),
+                       pending_responding_commitment_signed: (false, false),
+                       pending_responding_commitment_signed_dup_monitor: (false, false),
                        pending_htlc_adds: (0, 0),
                        pending_htlc_claims: (0, 0),
                        pending_htlc_fails: (0, 0),
@@ -3050,7 +3079,8 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
 pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
        let ReconnectArgs {
                node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
-               pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
+               pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa,
+               pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor,
        } = args;
        node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
                features: node_b.node.init_features(), networks: None, remote_network_address: None
@@ -3135,13 +3165,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                } else {
                        assert!(chan_msgs.1.is_none());
                }
-               if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+               if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 ||
+                       pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 ||
+                       pending_responding_commitment_signed.0
+               {
                        let commitment_update = chan_msgs.2.unwrap();
-                       if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
-                               assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
-                       } else {
-                               assert!(commitment_update.update_add_htlcs.is_empty());
-                       }
+                       assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0);
                        assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
                        assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0 + pending_cell_htlc_fails.0);
                        assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
@@ -3155,7 +3184,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                                node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail);
                        }
 
-                       if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+                       if !pending_responding_commitment_signed.0 {
                                commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
                        } else {
                                node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed);
@@ -3164,7 +3193,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                                // No commitment_signed so get_event_msg's assert(len == 1) passes
                                node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack);
                                assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
-                               check_added_monitors!(node_b, 1);
+                               check_added_monitors!(node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 });
                        }
                } else {
                        assert!(chan_msgs.2.is_none());
@@ -3194,11 +3223,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                } else {
                        assert!(chan_msgs.1.is_none());
                }
-               if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+               if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 ||
+                       pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 ||
+                       pending_responding_commitment_signed.1
+               {
                        let commitment_update = chan_msgs.2.unwrap();
-                       if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
-                               assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
-                       }
+                       assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1);
                        assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
                        assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.1 + pending_cell_htlc_fails.1);
                        assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
@@ -3212,7 +3242,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                                node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail);
                        }
 
-                       if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+                       if !pending_responding_commitment_signed.1 {
                                commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
                        } else {
                                node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed);
@@ -3221,7 +3251,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
                                // No commitment_signed so get_event_msg's assert(len == 1) passes
                                node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
                                assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
-                               check_added_monitors!(node_a, 1);
+                               check_added_monitors!(node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 });
                        }
                } else {
                        assert!(chan_msgs.2.is_none());
index 2fbc36ce9a5b365d1b548f2fa5038722ab1db625..c0e33432611bd4aacc29f15b3affdea8fb1c7e98 100644 (file)
@@ -17,20 +17,20 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
-use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
-use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
-use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
+use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils::{self, WatchtowerPersister};
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -56,7 +56,7 @@ use alloc::collections::BTreeSet;
 use core::default::Default;
 use core::iter::repeat;
 use bitcoin::hashes::Hash;
-use crate::sync::{Arc, Mutex};
+use crate::sync::{Arc, Mutex, RwLock};
 
 use crate::ln::functional_test_utils::*;
 use crate::ln::chan_utils::CommitmentTransaction;
@@ -181,14 +181,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               if send_from_initiator {
-                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-                       chan.context.holder_selected_channel_reserve_satoshis = 0;
-                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
-               } else {
-                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-                       chan.context.holder_selected_channel_reserve_satoshis = 0;
-                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+
+               let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+               match channel_phase {
+                       ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+                               let chan_context = channel_phase.context_mut();
+                               chan_context.holder_selected_channel_reserve_satoshis = 0;
+                               chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+                       },
+                       ChannelPhase::Funded(_) => assert!(false),
                }
        }
 
@@ -696,12 +697,14 @@ fn test_update_fee_that_funder_cannot_afford() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = local_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
@@ -710,7 +713,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = remote_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@@ -725,7 +730,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let res = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let local_chan_signer = local_chan.get_signer();
                let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
@@ -1049,7 +1056,9 @@ fn fake_network_test() {
        });
        hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
        hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
-       let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
+       let payment_preimage_1 = send_along_route(&nodes[1],
+               Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
+                       &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
 
        let mut hops = Vec::with_capacity(3);
        hops.push(RouteHop {
@@ -1078,7 +1087,9 @@ fn fake_network_test() {
        });
        hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
        hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
-       let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
+       let payment_hash_2 = send_along_route(&nodes[1],
+               Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
+                       &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
 
        // Claim the rebalances...
        fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
@@ -1233,7 +1244,7 @@ fn duplicate_htlc_test() {
        create_announced_chan_between_nodes(&nodes, 3, 4);
        create_announced_chan_between_nodes(&nodes, 3, 5);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
 
        *nodes[0].network_payment_count.borrow_mut() -= 1;
        assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
@@ -1261,7 +1272,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
        // balancing
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
 
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
        let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
@@ -1409,12 +1420,14 @@ fn test_fee_spike_violation_fails_htlc() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = local_chan.get_signer();
                // Make the signer believe we validated another commitment, so we can release the secret
                chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
@@ -1428,7 +1441,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = remote_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@@ -1457,7 +1472,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let res = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let local_chan_signer = local_chan.get_signer();
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
                        commitment_number,
@@ -1505,7 +1522,7 @@ fn test_fee_spike_violation_fails_htlc() {
                _ => panic!("Unexpected event"),
        };
        nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
-               format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
+               format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
 
        check_added_monitors!(nodes[1], 2);
 }
@@ -1535,7 +1552,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
        // Sending exactly enough to hit the reserve amount should be accepted
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
-               let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+               route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        // However one more HTLC should be significantly over the reserve amount and fail.
@@ -1565,7 +1582,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
-               let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+               route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        let (mut route, payment_hash, _, payment_secret) =
@@ -1626,11 +1643,11 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
        // commitment transaction fee.
-       let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
+       route_payment(&nodes[1], &[&nodes[0]], dust_amt);
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
-               let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+               route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        // One more than the dust amt should fail, however.
@@ -1691,22 +1708,22 @@ fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
 
        let payment_amt = 46000; // Dust amount
        // In the previous code, these first four payments would succeed.
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
 
        // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
 
        // And this last payment previously resulted in nodes[1] closing on its inbound-channel
        // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
        // transaction fee and therefore perceived this next payment as a channel reserve violation.
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
 }
 
 #[test]
@@ -2092,8 +2109,8 @@ fn channel_reserve_in_flight_removes() {
        let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
        // Route the first two HTLCs.
        let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
 
        // Start routing the third HTLC (this is just used to get everyone in the right state).
        let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -2260,7 +2277,7 @@ fn channel_monitor_network_test() {
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 
        // One pending HTLC is discarded by the force-close:
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
@@ -2331,7 +2348,7 @@ fn channel_monitor_network_test() {
        let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
 
        // One pending HTLC to time out:
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
        // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
        // buffer space).
 
@@ -2636,7 +2653,7 @@ fn claim_htlc_outputs_shared_tx() {
        send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
        let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
-       let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2703,7 +2720,7 @@ fn claim_htlc_outputs_single_tx() {
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
        // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
        let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
-       let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2806,8 +2823,8 @@ fn test_htlc_on_chain_success() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
-       let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
 
        // Broadcast legit commitment tx from C on B's chain
        // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
@@ -3028,7 +3045,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+       let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
 
        // Broadcast legit commitment tx from C on B's chain
        let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
@@ -3133,13 +3150,13 @@ fn test_simple_commitment_revoked_fail_backward() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+       let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
        // Get the will-be-revoked local txn from nodes[2]
        let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        // Revoke the old state
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
 
-       let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+       let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
@@ -3192,7 +3209,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
+       let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
        // Get the will-be-revoked local txn from nodes[2]
        let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
@@ -3203,12 +3220,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
                // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
                nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
-                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
+                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
        } else { 3000000 };
 
-       let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
-       let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
-       let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+       let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+       let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+       let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
        nodes[2].node.fail_htlc_backwards(&first_payment_hash);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
@@ -3643,7 +3660,7 @@ fn test_dup_events_on_peer_disconnect() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        nodes[1].node.claim_funds(payment_preimage);
        expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
@@ -3729,7 +3746,7 @@ fn test_simple_peer_disconnect() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
-       let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
+       let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
        let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@@ -3863,13 +3880,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        } else if messages_delivered == 3 {
                // nodes[0] still wants its RAA + commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_args.pending_responding_commitment_signed.0 = true;
                reconnect_args.pending_raa.0 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[0] still wants its commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_args.pending_responding_commitment_signed.0 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 5 {
                // nodes[1] still wants its final RAA
@@ -3997,13 +4014,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        } else if messages_delivered == 2 {
                // nodes[0] still wants its RAA + commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_args.pending_responding_commitment_signed.1 = true;
                reconnect_args.pending_raa.1 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 3 {
                // nodes[0] still wants its commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_args.pending_responding_commitment_signed.1 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[1] still wants its final RAA
@@ -4076,6 +4093,73 @@ fn test_channel_ready_without_best_block_updated() {
        nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
 }
 
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Let channel_manager get ahead of chain_monitor by 1 block.
+       // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+       // in case where client calls block_connect on channel_manager first and then on chain_monitor.
+       let height_1 = nodes[0].best_block_info().1 + 1;
+       let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+       nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+       nodes[0].node.block_connected(&block_1, height_1);
+
+       // Create channel, and it gets added to chain_monitor in funding_created.
+       let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+       // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
+       // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
+       // was running ahead of chain_monitor at the time of funding_created.
+       // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+       // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+       confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+       connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+       // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+       let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Let chain_monitor get ahead of channel_manager by 1 block.
+       // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+       // in case where client calls block_connect on chain_monitor first and then on channel_manager.
+       let height_1 = nodes[0].best_block_info().1 + 1;
+       let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+       nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+       nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
+
+       // Create channel, and it gets added to chain_monitor in funding_created.
+       let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+       // channel_manager can't really skip block_1, it should get it eventually.
+       nodes[0].node.block_connected(&block_1, height_1);
+
+       // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
+       // the block before block_1, since that was populated by channel_manager, and channel_manager was
+       // running behind at the time of funding_created.
+       // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+       // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+       confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+       connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+       // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+       let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
 #[test]
 fn test_drop_messages_peer_disconnect_dual_htlc() {
        // Test that we can handle reconnecting when both sides of a channel have pending
@@ -4086,7 +4170,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -4490,7 +4574,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
 
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(commitment_tx[0].input.len(), 1);
@@ -4540,7 +4624,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        // Rebalance the network a bit by relaying one payment through all the channels ...
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-       let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
 
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(commitment_tx[0].input.len(), 1);
@@ -4780,7 +4864,7 @@ fn test_onchain_to_onchain_claim() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
        let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.claim_funds(payment_preimage);
@@ -4893,7 +4977,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
        connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
 
-       let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
+       let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
 
        let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
        // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
@@ -5023,7 +5107,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
        let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
        assert_eq!(local_txn.len(), 1);
        assert_eq!(local_txn[0].input.len(), 1);
@@ -5101,20 +5185,20 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
        // 0th HTLC:
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
-       let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+       let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
        // 2nd HTLC:
        send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
        // 3rd HTLC:
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
        // 4th HTLC:
-       let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+       let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
        // 5th HTLC:
-       let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+       let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
        // 6th HTLC:
        send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
@@ -5122,13 +5206,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
 
        // 8th HTLC:
-       let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+       let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
        // 9th HTLC:
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
 
        // 10th HTLC:
-       let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+       let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 11th HTLC:
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
@@ -5382,7 +5466,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
        let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn[0].input.len(), 1);
        check_spends!(local_txn[0], chan_1.3);
@@ -5434,7 +5518,7 @@ fn test_key_derivation_params() {
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
        let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
-       let scorer = Mutex::new(test_utils::TestScorer::new());
+       let scorer = RwLock::new(test_utils::TestScorer::new());
        let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
        let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
@@ -5457,7 +5541,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
        connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
 
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
        let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
        let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn_1[0].input.len(), 1);
@@ -5543,7 +5627,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
 
        // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
        // present in B's local commitment transaction, but none of A's commitment transactions.
@@ -5616,7 +5700,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        // Also optionally test that we *don't* fail the channel in case the commitment transaction was
        // actually revoked.
        let htlc_value = if use_dust { 50000 } else { 3000000 };
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        nodes[1].node.fail_htlc_backwards(&our_payment_hash);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        check_added_monitors!(nodes[1], 1);
@@ -5835,7 +5919,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
        // us to surface its failure to the user.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5923,7 +6007,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        // to surface its failure to the user. The first payment should succeed.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -6211,7 +6295,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
        let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
 
        // Fetch a route in advance as we will be unable to once we're unable to send.
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -6284,7 +6368,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
-               htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
+               htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
        }
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@@ -6588,7 +6672,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
@@ -6631,7 +6715,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
@@ -6887,11 +6971,11 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
 
        // We route 2 dust-HTLCs between A and B
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
-       let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        // Cache one local commitment tx as previous
@@ -6980,17 +7064,17 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
 
-       let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
-       let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
        let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
 
        // We revoked bs_commitment_tx
        if revoked {
-               let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+               let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
                claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
        }
 
@@ -7137,8 +7221,11 @@ fn test_check_htlc_underpaying() {
 
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
-       let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(),
+               TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
+       let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
+               None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
        let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
        let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@ -7394,12 +7481,14 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
-               3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
+       let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
+               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
        let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
        let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
-       let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
-               3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
+       let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
+               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
        send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
@@ -7545,7 +7634,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
 
        // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
@@ -7639,7 +7728,7 @@ fn test_counterparty_raa_skip_no_crash() {
        // commitment transaction, we would have happily carried on and provided them the next
        // commitment transaction based on one RAA forward. This would probably eventually have led to
        // channel closure, but it would not have resulted in funds loss. Still, our
-       // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
+       // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
        // check simply that the channel is closed in response to such an RAA, but don't check whether
        // we decide to punish our counterparty for revoking their funds (as we don't currently
        // implement that).
@@ -7654,7 +7743,9 @@ fn test_counterparty_raa_skip_no_crash() {
        {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
+               let keys = guard.channel_by_id.get_mut(&channel_id).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap().get_signer();
 
                const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -7697,8 +7788,8 @@ fn test_bump_txn_sanitize_tracking_maps() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
        // Lock HTLC in both directions
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
-       let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
+       let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+       let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
@@ -8003,7 +8094,7 @@ fn test_can_not_accept_unknown_inbound_channel() {
        let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
        let nodes = create_network(2, &node_cfg, &node_chanmgr);
 
-       let unknown_channel_id = [0; 32];
+       let unknown_channel_id = ChannelId::new_zero();
        let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
        match api_res {
                Err(APIError::APIMisuseError { err }) => {
@@ -8341,7 +8432,7 @@ fn test_update_err_monitor_lockdown() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
 
        // Route a HTLC from node 0 to node 1 (but don't settle)
-       let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+       let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
 
        // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
@@ -8350,7 +8441,7 @@ fn test_update_err_monitor_lockdown() {
        let watchtower = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8376,11 +8467,14 @@ fn test_update_err_monitor_lockdown() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
@@ -8420,7 +8514,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_alice = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8451,7 +8545,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_bob = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8474,13 +8568,16 @@ fn test_concurrent_monitor_claim() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Watchtower Alice should already have seen the block and reject the update
-                       assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
-                       assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               // Watchtower Alice should already have seen the block and reject the update
+                               assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+                               assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
@@ -8571,7 +8668,7 @@ fn test_htlc_no_detection() {
        let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
 
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
        let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn[0].input.len(), 1);
        assert_eq!(local_txn[0].output.len(), 3);
@@ -8627,7 +8724,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
 
        // Steps (1) and (2):
        // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
-       let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
 
        // Check that Alice's commitment transaction now contains an output for this HTLC.
        let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
@@ -8680,7 +8777,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
 
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
+       let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
        // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
        if !go_onchain_before_fulfill && broadcast_alice {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -8931,9 +9029,13 @@ fn test_duplicate_chan_id() {
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
-               let logger = test_utils::TestLogger::new();
-               as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                               let logger = test_utils::TestLogger::new();
+                               chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+                       },
+                       _ => panic!("Unexpected ChannelPhase variant"),
+               }
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -9017,7 +9119,7 @@ fn test_error_chans_closed() {
 
        // A null channel ID should close all channels
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
-       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
                [nodes[1].node.get_our_node_id(); 2], 100000);
@@ -9133,6 +9235,66 @@ fn test_invalid_funding_tx() {
        mine_transaction(&nodes[1], &spend_tx);
 }
 
+#[test]
+fn test_coinbase_funding_tx() {
+       // Miners are able to fund channels directly from coinbase transactions, however
+       // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
+       // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
+       // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
+       //
+       // Note that 0conf channels with coinbase funding transactions are unaffected and are
+       // immediately operational after opening.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
+       let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+
+       // Create the coinbase funding transaction.
+       let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
+
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+       check_added_monitors!(nodes[0], 0);
+       let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
+       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+       // Starting at height 0, we "confirm" the coinbase at height 1.
+       confirm_transaction_at(&nodes[0], &tx, 1);
+       // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
+       connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
+       // Check that we have no pending message events (we have not queued a `channel_ready` yet).
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       // Now connect one more block which results in 100 confirmations of the coinbase transaction.
+       connect_blocks(&nodes[0], 1);
+       // There should now be a `channel_ready` which can be handled.
+       let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
+
+       confirm_transaction_at(&nodes[1], &tx, 1);
+       connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       connect_blocks(&nodes[1], 1);
+       expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+       create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+}
+
 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
        // In the first version of the chain::Confirm interface, after a refactor was made to not
        // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
@@ -9158,7 +9320,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
-       let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
@@ -9540,8 +9702,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
-               chan.context.holder_dust_limit_satoshis = 546;
+               match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
+                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                               chan.context.holder_dust_limit_satoshis = 546;
+                       },
+                       _ => panic!("Unexpected ChannelPhase variant"),
+               }
        }
 
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
@@ -9565,8 +9731,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               (chan.context.get_dust_buffer_feerate(None) as u64,
-               chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
+               (chan.context().get_dust_buffer_feerate(None) as u64,
+               chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
@@ -10025,7 +10191,7 @@ fn test_remove_expired_outbound_unfunded_channels() {
        let check_outbound_channel_existence = |should_exist: bool| {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+               assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
        };
 
        // Channel should exist without any timer ticks.
@@ -10076,7 +10242,7 @@ fn test_remove_expired_inbound_unfunded_channels() {
        let check_inbound_channel_existence = |should_exist: bool| {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+               assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
        };
 
        // Channel should exist without any timer ticks.
@@ -10124,8 +10290,8 @@ fn do_test_multi_post_event_actions(do_reload: bool) {
        send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
        send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
index dda7cc2b29a125d214e022b18511e4cfc78fd031..f9e10880afbe5f5eae122d48220f5b8e5dcd9757 100644 (file)
@@ -19,7 +19,7 @@ use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 use crate::ln::msgs;
 use crate::ln::msgs::MAX_VALUE_MSAT;
 use crate::util::chacha20::ChaCha20;
-use crate::util::crypto::hkdf_extract_expand_4x;
+use crate::util::crypto::hkdf_extract_expand_5x;
 use crate::util::errors::APIError;
 use crate::util::logger::Logger;
 
@@ -50,6 +50,8 @@ pub struct ExpandedKey {
        user_pmt_hash_key: [u8; 32],
        /// The base key used to derive signing keys and authenticate messages for BOLT 12 Offers.
        offers_base_key: [u8; 32],
+       /// The key used to encrypt message metadata for BOLT 12 Offers.
+       offers_encryption_key: [u8; 32],
 }
 
 impl ExpandedKey {
@@ -57,20 +59,25 @@ impl ExpandedKey {
        ///
        /// It is recommended to cache this value and not regenerate it for each new inbound payment.
        pub fn new(key_material: &KeyMaterial) -> ExpandedKey {
-               let (metadata_key, ldk_pmt_hash_key, user_pmt_hash_key, offers_base_key) =
-                       hkdf_extract_expand_4x(b"LDK Inbound Payment Key Expansion", &key_material.0);
+               let (
+                       metadata_key,
+                       ldk_pmt_hash_key,
+                       user_pmt_hash_key,
+                       offers_base_key,
+                       offers_encryption_key,
+               ) = hkdf_extract_expand_5x(b"LDK Inbound Payment Key Expansion", &key_material.0);
                Self {
                        metadata_key,
                        ldk_pmt_hash_key,
                        user_pmt_hash_key,
                        offers_base_key,
+                       offers_encryption_key,
                }
        }
 
        /// Returns an [`HmacEngine`] used to construct [`Offer::metadata`].
        ///
        /// [`Offer::metadata`]: crate::offers::offer::Offer::metadata
-       #[allow(unused)]
        pub(crate) fn hmac_for_offer(
                &self, nonce: Nonce, iv_bytes: &[u8; IV_LEN]
        ) -> HmacEngine<Sha256> {
@@ -79,6 +86,13 @@ impl ExpandedKey {
                hmac.input(&nonce.0);
                hmac
        }
+
+       /// Encrypts or decrypts the given `bytes`. Used for data included in an offer message's
+       /// metadata (e.g., payment id).
+       pub(crate) fn crypt_for_offer(&self, mut bytes: [u8; 32], nonce: Nonce) -> [u8; 32] {
+               ChaCha20::encrypt_single_block_in_place(&self.offers_encryption_key, &nonce.0, &mut bytes);
+               bytes
+       }
 }
 
 /// A 128-bit number used only once.
@@ -88,7 +102,6 @@ impl ExpandedKey {
 ///
 /// [`Offer::metadata`]: crate::offers::offer::Offer::metadata
 /// [`Offer::signing_pubkey`]: crate::offers::offer::Offer::signing_pubkey
-#[allow(unused)]
 #[derive(Clone, Copy, Debug, PartialEq)]
 pub(crate) struct Nonce(pub(crate) [u8; Self::LENGTH]);
 
@@ -271,10 +284,9 @@ fn construct_payment_secret(iv_bytes: &[u8; IV_LEN], metadata_bytes: &[u8; METAD
        let (iv_slice, encrypted_metadata_slice) = payment_secret_bytes.split_at_mut(IV_LEN);
        iv_slice.copy_from_slice(iv_bytes);
 
-       let chacha_block = ChaCha20::get_single_block(metadata_key, iv_bytes);
-       for i in 0..METADATA_LEN {
-               encrypted_metadata_slice[i] = chacha_block[i] ^ metadata_bytes[i];
-       }
+       ChaCha20::encrypt_single_block(
+               metadata_key, iv_bytes, encrypted_metadata_slice, metadata_bytes
+       );
        PaymentSecret(payment_secret_bytes)
 }
 
@@ -406,11 +418,10 @@ fn decrypt_metadata(payment_secret: PaymentSecret, keys: &ExpandedKey) -> ([u8;
        let (iv_slice, encrypted_metadata_bytes) = payment_secret.0.split_at(IV_LEN);
        iv_bytes.copy_from_slice(iv_slice);
 
-       let chacha_block = ChaCha20::get_single_block(&keys.metadata_key, &iv_bytes);
        let mut metadata_bytes: [u8; METADATA_LEN] = [0; METADATA_LEN];
-       for i in 0..METADATA_LEN {
-               metadata_bytes[i] = chacha_block[i] ^ encrypted_metadata_bytes[i];
-       }
+       ChaCha20::encrypt_single_block(
+               &keys.metadata_key, &iv_bytes, &mut metadata_bytes, encrypted_metadata_bytes
+       );
 
        (iv_bytes, metadata_bytes)
 }
index a5de0d4c2f5d77a70d254cca647808c77563ac50..beefd2d463b3ed2b938c6f82fdabef63757adccd 100644 (file)
@@ -20,6 +20,7 @@ pub mod peer_handler;
 pub mod chan_utils;
 pub mod features;
 pub mod script;
+mod channel_id;
 
 #[cfg(fuzzing)]
 pub mod peer_channel_encryptor;
@@ -31,6 +32,9 @@ pub mod channel;
 #[cfg(not(fuzzing))]
 pub(crate) mod channel;
 
+// Re-export ChannelId
+pub use channel_id::ChannelId;
+
 pub(crate) mod onion_utils;
 mod outbound_payment;
 pub mod wire;
@@ -39,6 +43,9 @@ pub mod wire;
 // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain
 // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below.
 
+#[cfg(test)]
+#[allow(unused_mut)]
+mod blinded_payment_tests;
 #[cfg(test)]
 #[allow(unused_mut)]
 mod functional_tests;
index 728752f97c310bc478abc213d165fc347a2db407..ece24794f63f593ecba133660fa73a466fc547a7 100644 (file)
@@ -269,12 +269,12 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 
        // This HTLC is immediately claimed, giving node B the preimage
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
        // This HTLC is allowed to time out, letting A claim it. However, in order to test claimable
        // balances more fully we also give B the preimage for this HTLC.
-       let (timeout_payment_preimage, timeout_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 4_000_000);
+       let (timeout_payment_preimage, timeout_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 4_000_000);
        // This HTLC will be dust, and not be claimable at all:
-       let (dust_payment_preimage, dust_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000);
+       let (dust_payment_preimage, dust_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000);
 
        let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
 
@@ -1871,7 +1871,7 @@ fn test_yield_anchors_events() {
                &nodes, 0, 1, 1_000_000, 500_000_000
        ).2;
        route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
 
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 
@@ -2191,7 +2191,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
 
        // Alice should see that Bob is trying to claim to HTLCs, so she should now try to claim them at
        // the second level instead.
-       let revoked_claims = {
+       let revoked_claim_transactions = {
                let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(txn.len(), 2);
 
@@ -2205,10 +2205,14 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        check_spends!(revoked_htlc_claim, htlc_tx);
                }
 
-               txn
+               let mut revoked_claim_transaction_map = HashMap::new();
+               for current_tx in txn.into_iter() {
+                       revoked_claim_transaction_map.insert(current_tx.txid(), current_tx);
+               }
+               revoked_claim_transaction_map
        };
        for node in &nodes {
-               mine_transactions(node, &revoked_claims.iter().collect::<Vec<_>>());
+               mine_transactions(node, &revoked_claim_transactions.values().collect::<Vec<_>>());
        }
 
 
@@ -2227,14 +2231,15 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
        assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
        let spendable_output_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
        assert_eq!(spendable_output_events.len(), 2);
-       for (idx, event) in spendable_output_events.iter().enumerate() {
+       for event in spendable_output_events.iter() {
                if let Event::SpendableOutputs { outputs, channel_id } = event {
                        assert_eq!(outputs.len(), 1);
                        assert!(vec![chan_b.2, chan_a.2].contains(&channel_id.unwrap()));
                        let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(
                                &[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, None, &Secp256k1::new(),
                        ).unwrap();
-                       check_spends!(spend_tx, revoked_claims[idx]);
+
+                       check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
                } else {
                        panic!("unexpected event");
                }
index dc8d9215438e09a4eb505a4106f15083003519bc..c617d97fe52705c8673c6f4a68f331bb8ed644a6 100644 (file)
@@ -31,21 +31,27 @@ use bitcoin::{secp256k1, Witness};
 use bitcoin::blockdata::script::Script;
 use bitcoin::hash_types::{Txid, BlockHash};
 
+use crate::blinded_path::payment::ReceiveTlvs;
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 use crate::ln::onion_utils;
 use crate::onion_message;
+use crate::sign::{NodeSigner, Recipient};
 
 use crate::prelude::*;
+use core::convert::TryFrom;
 use core::fmt;
 use core::fmt::Debug;
-use crate::io::{self, Read};
+use core::ops::Deref;
+use core::str::FromStr;
+use crate::io::{self, Cursor, Read};
 use crate::io_extras::read_to_end;
 
 use crate::events::{MessageSendEventsProvider, OnionMessageProvider};
+use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
 use crate::util::logger;
-use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
-
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::util::ser::{LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
+use crate::util::base32;
 
 use crate::routing::gossip::{NodeAlias, NodeId};
 
@@ -99,7 +105,7 @@ pub struct Init {
        /// message. A node can decide to use that information to discover a potential update to its
        /// public IPv4 address (NAT) and use that for a [`NodeAnnouncement`] update message containing
        /// the new address.
-       pub remote_network_address: Option<NetAddress>,
+       pub remote_network_address: Option<SocketAddress>,
 }
 
 /// An [`error`] message to be sent to or received from a peer.
@@ -111,7 +117,7 @@ pub struct ErrorMessage {
        ///
        /// All-0s indicates a general error unrelated to a specific channel, after which all channels
        /// with the sending peer should be closed.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A possibly human-readable error description.
        ///
        /// The string should be sanitized before it is used (e.g., emitted to logs or printed to
@@ -128,7 +134,7 @@ pub struct WarningMessage {
        /// The channel ID involved in the warning.
        ///
        /// All-0s indicates a warning unrelated to a specific channel.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A possibly human-readable warning description.
        ///
        /// The string should be sanitized before it is used (e.g. emitted to logs or printed to
@@ -171,7 +177,7 @@ pub struct OpenChannel {
        /// The genesis hash of the blockchain where the channel is to be opened
        pub chain_hash: BlockHash,
        /// A temporary channel ID, until the funding outpoint is announced
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The channel value
        pub funding_satoshis: u64,
        /// The amount to push to the counterparty as part of the open, in milli-satoshi
@@ -225,7 +231,7 @@ pub struct OpenChannelV2 {
        /// The genesis hash of the blockchain where the channel is to be opened
        pub chain_hash: BlockHash,
        /// A temporary channel ID derived using a zeroed out value for the channel acceptor's revocation basepoint
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The feerate for the funding transaction set by the channel initiator
        pub funding_feerate_sat_per_1000_weight: u32,
        /// The feerate for the commitment transaction set by the channel initiator
@@ -282,7 +288,7 @@ pub struct OpenChannelV2 {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AcceptChannel {
        /// A temporary channel ID, until the funding outpoint is announced
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The threshold below which outputs on transactions broadcast by sender will be omitted
        pub dust_limit_satoshis: u64,
        /// The maximum inbound HTLC value in flight towards sender, in milli-satoshi
@@ -330,7 +336,7 @@ pub struct AcceptChannel {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AcceptChannelV2 {
        /// The same `temporary_channel_id` received from the initiator's `open_channel2` message.
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// Part of the channel value contributed by the channel acceptor
        pub funding_satoshis: u64,
        /// The threshold below which outputs on transactions broadcast by the channel acceptor will be
@@ -383,7 +389,7 @@ pub struct AcceptChannelV2 {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct FundingCreated {
        /// A temporary channel ID, until the funding is established
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The funding transaction ID
        pub funding_txid: Txid,
        /// The specific output index funding this channel
@@ -406,7 +412,7 @@ pub struct FundingCreated {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct FundingSigned {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The signature of the channel acceptor (fundee) on the initial commitment transaction
        pub signature: Signature,
        #[cfg(taproot)]
@@ -420,7 +426,7 @@ pub struct FundingSigned {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelReady {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The per-commitment point of the second commitment transaction
        pub next_per_commitment_point: PublicKey,
        /// If set, provides a `short_channel_id` alias for this channel.
@@ -436,7 +442,7 @@ pub struct ChannelReady {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAddInput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A randomly chosen unique identifier for this input, which is even for initiators and odd for
        /// non-initiators.
        pub serial_id: u64,
@@ -455,7 +461,7 @@ pub struct TxAddInput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAddOutput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A randomly chosen unique identifier for this output, which is even for initiators and odd for
        /// non-initiators.
        pub serial_id: u64,
@@ -471,7 +477,7 @@ pub struct TxAddOutput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxRemoveInput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The serial ID of the input to be removed
        pub serial_id: u64,
 }
@@ -482,7 +488,7 @@ pub struct TxRemoveInput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxRemoveOutput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The serial ID of the output to be removed
        pub serial_id: u64,
 }
@@ -494,7 +500,7 @@ pub struct TxRemoveOutput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxComplete {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
 }
 
 /// A tx_signatures message containing the sender's signatures for a transaction constructed with
@@ -504,7 +510,7 @@ pub struct TxComplete {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxSignatures {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The TXID
        pub tx_hash: Txid,
        /// The list of witnesses
@@ -518,7 +524,7 @@ pub struct TxSignatures {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxInitRbf {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The locktime of the transaction
        pub locktime: u32,
        /// The feerate of the transaction
@@ -535,7 +541,7 @@ pub struct TxInitRbf {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAckRbf {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The number of satoshis the sender will contribute to or, if negative, remove from
        /// (e.g. splice-out) the funding output of the transaction
        pub funding_output_contribution: Option<i64>,
@@ -547,7 +553,7 @@ pub struct TxAckRbf {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAbort {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// Message data
        pub data: Vec<u8>,
 }
@@ -558,7 +564,7 @@ pub struct TxAbort {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Shutdown {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The destination of this peer's funds on closing.
        ///
        /// Must be in one of these forms: P2PKH, P2SH, P2WPKH, P2WSH, P2TR.
@@ -585,7 +591,7 @@ pub struct ClosingSignedFeeRange {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ClosingSigned {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The proposed total fee for the closing transaction
        pub fee_satoshis: u64,
        /// A signature on the closing transaction
@@ -601,7 +607,7 @@ pub struct ClosingSigned {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateAddHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        /// The HTLC value in milli-satoshi
@@ -634,7 +640,7 @@ pub struct OnionMessage {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFulfillHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        /// The pre-image of the payment hash, allowing HTLC redemption
@@ -647,7 +653,7 @@ pub struct UpdateFulfillHTLC {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFailHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        pub(crate) reason: OnionErrorPacket,
@@ -659,7 +665,7 @@ pub struct UpdateFailHTLC {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFailMalformedHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        pub(crate) sha256_of_onion: [u8; 32],
@@ -673,7 +679,7 @@ pub struct UpdateFailMalformedHTLC {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct CommitmentSigned {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A signature on the commitment transaction
        pub signature: Signature,
        /// Signatures on the HTLC transactions
@@ -689,7 +695,7 @@ pub struct CommitmentSigned {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct RevokeAndACK {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The secret corresponding to the per-commitment point
        pub per_commitment_secret: [u8; 32],
        /// The next sender-broadcast commitment transaction's per-commitment point
@@ -705,7 +711,7 @@ pub struct RevokeAndACK {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFee {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// Fee rate per 1000-weight of the transaction
        pub feerate_per_kw: u32,
 }
@@ -716,7 +722,7 @@ pub struct UpdateFee {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelReestablish {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The next commitment number for the sender
        pub next_local_commitment_number: u64,
        /// The next commitment number for the recipient
@@ -736,7 +742,7 @@ pub struct ChannelReestablish {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AnnouncementSignatures {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The short channel ID
        pub short_channel_id: u64,
        /// A signature by the node key
@@ -747,16 +753,16 @@ pub struct AnnouncementSignatures {
 
 /// An address which can be used to connect to a remote peer.
 #[derive(Clone, Debug, PartialEq, Eq)]
-pub enum NetAddress {
-       /// An IPv4 address/port on which the peer is listening.
-       IPv4 {
+pub enum SocketAddress {
+       /// An IPv4 address and port on which the peer is listening.
+       TcpIpV4 {
                /// The 4-byte IPv4 address
                addr: [u8; 4],
                /// The port on which the node is listening
                port: u16,
        },
-       /// An IPv6 address/port on which the peer is listening.
-       IPv6 {
+       /// An IPv6 address and port on which the peer is listening.
+       TcpIpV6 {
                /// The 16-byte IPv6 address
                addr: [u8; 16],
                /// The port on which the node is listening
@@ -789,28 +795,28 @@ pub enum NetAddress {
                port: u16,
        },
 }
-impl NetAddress {
+impl SocketAddress {
        /// Gets the ID of this address type. Addresses in [`NodeAnnouncement`] messages should be sorted
        /// by this.
        pub(crate) fn get_id(&self) -> u8 {
                match self {
-                       &NetAddress::IPv4 {..} => { 1 },
-                       &NetAddress::IPv6 {..} => { 2 },
-                       &NetAddress::OnionV2(_) => { 3 },
-                       &NetAddress::OnionV3 {..} => { 4 },
-                       &NetAddress::Hostname {..} => { 5 },
+                       &SocketAddress::TcpIpV4 {..} => { 1 },
+                       &SocketAddress::TcpIpV6 {..} => { 2 },
+                       &SocketAddress::OnionV2(_) => { 3 },
+                       &SocketAddress::OnionV3 {..} => { 4 },
+                       &SocketAddress::Hostname {..} => { 5 },
                }
        }
 
        /// Strict byte-length of address descriptor, 1-byte type not recorded
        fn len(&self) -> u16 {
                match self {
-                       &NetAddress::IPv4 { .. } => { 6 },
-                       &NetAddress::IPv6 { .. } => { 18 },
-                       &NetAddress::OnionV2(_) => { 12 },
-                       &NetAddress::OnionV3 { .. } => { 37 },
+                       &SocketAddress::TcpIpV4 { .. } => { 6 },
+                       &SocketAddress::TcpIpV6 { .. } => { 18 },
+                       &SocketAddress::OnionV2(_) => { 12 },
+                       &SocketAddress::OnionV3 { .. } => { 37 },
                        // Consists of 1-byte hostname length, hostname bytes, and 2-byte port.
-                       &NetAddress::Hostname { ref hostname, .. } => { u16::from(hostname.len()) + 3 },
+                       &SocketAddress::Hostname { ref hostname, .. } => { u16::from(hostname.len()) + 3 },
                }
        }
 
@@ -820,31 +826,31 @@ impl NetAddress {
        pub(crate) const MAX_LEN: u16 = 258;
 }
 
-impl Writeable for NetAddress {
+impl Writeable for SocketAddress {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self {
-                       &NetAddress::IPv4 { ref addr, ref port } => {
+                       &SocketAddress::TcpIpV4 { ref addr, ref port } => {
                                1u8.write(writer)?;
                                addr.write(writer)?;
                                port.write(writer)?;
                        },
-                       &NetAddress::IPv6 { ref addr, ref port } => {
+                       &SocketAddress::TcpIpV6 { ref addr, ref port } => {
                                2u8.write(writer)?;
                                addr.write(writer)?;
                                port.write(writer)?;
                        },
-                       &NetAddress::OnionV2(bytes) => {
+                       &SocketAddress::OnionV2(bytes) => {
                                3u8.write(writer)?;
                                bytes.write(writer)?;
                        },
-                       &NetAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
+                       &SocketAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
                                4u8.write(writer)?;
                                ed25519_pubkey.write(writer)?;
                                checksum.write(writer)?;
                                version.write(writer)?;
                                port.write(writer)?;
                        },
-                       &NetAddress::Hostname { ref hostname, ref port } => {
+                       &SocketAddress::Hostname { ref hostname, ref port } => {
                                5u8.write(writer)?;
                                hostname.write(writer)?;
                                port.write(writer)?;
@@ -854,25 +860,25 @@ impl Writeable for NetAddress {
        }
 }
 
-impl Readable for Result<NetAddress, u8> {
-       fn read<R: Read>(reader: &mut R) -> Result<Result<NetAddress, u8>, DecodeError> {
+impl Readable for Result<SocketAddress, u8> {
+       fn read<R: Read>(reader: &mut R) -> Result<Result<SocketAddress, u8>, DecodeError> {
                let byte = <u8 as Readable>::read(reader)?;
                match byte {
                        1 => {
-                               Ok(Ok(NetAddress::IPv4 {
+                               Ok(Ok(SocketAddress::TcpIpV4 {
                                        addr: Readable::read(reader)?,
                                        port: Readable::read(reader)?,
                                }))
                        },
                        2 => {
-                               Ok(Ok(NetAddress::IPv6 {
+                               Ok(Ok(SocketAddress::TcpIpV6 {
                                        addr: Readable::read(reader)?,
                                        port: Readable::read(reader)?,
                                }))
                        },
-                       3 => Ok(Ok(NetAddress::OnionV2(Readable::read(reader)?))),
+                       3 => Ok(Ok(SocketAddress::OnionV2(Readable::read(reader)?))),
                        4 => {
-                               Ok(Ok(NetAddress::OnionV3 {
+                               Ok(Ok(SocketAddress::OnionV3 {
                                        ed25519_pubkey: Readable::read(reader)?,
                                        checksum: Readable::read(reader)?,
                                        version: Readable::read(reader)?,
@@ -880,7 +886,7 @@ impl Readable for Result<NetAddress, u8> {
                                }))
                        },
                        5 => {
-                               Ok(Ok(NetAddress::Hostname {
+                               Ok(Ok(SocketAddress::Hostname {
                                        hostname: Readable::read(reader)?,
                                        port: Readable::read(reader)?,
                                }))
@@ -890,8 +896,8 @@ impl Readable for Result<NetAddress, u8> {
        }
 }
 
-impl Readable for NetAddress {
-       fn read<R: Read>(reader: &mut R) -> Result<NetAddress, DecodeError> {
+impl Readable for SocketAddress {
+       fn read<R: Read>(reader: &mut R) -> Result<SocketAddress, DecodeError> {
                match Readable::read(reader) {
                        Ok(Ok(res)) => Ok(res),
                        Ok(Err(_)) => Err(DecodeError::UnknownVersion),
@@ -900,6 +906,104 @@ impl Readable for NetAddress {
        }
 }
 
+/// [`SocketAddress`] error variants
+#[derive(Debug, Eq, PartialEq, Clone)]
+pub enum SocketAddressParseError {
+       /// Socket address (IPv4/IPv6) parsing error
+       SocketAddrParse,
+       /// Invalid input format
+       InvalidInput,
+       /// Invalid port
+       InvalidPort,
+       /// Invalid onion v3 address
+       InvalidOnionV3,
+}
+
+impl fmt::Display for SocketAddressParseError {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               match self {
+                       SocketAddressParseError::SocketAddrParse => write!(f, "Socket address (IPv4/IPv6) parsing error"),
+                       SocketAddressParseError::InvalidInput => write!(f, "Invalid input format. \
+                               Expected: \"<ipv4>:<port>\", \"[<ipv6>]:<port>\", \"<onion address>.onion:<port>\" or \"<hostname>:<port>\""),
+                       SocketAddressParseError::InvalidPort => write!(f, "Invalid port"),
+                       SocketAddressParseError::InvalidOnionV3 => write!(f, "Invalid onion v3 address"),
+               }
+       }
+}
+
+#[cfg(feature = "std")]
+impl From<std::net::SocketAddrV4> for SocketAddress {
+               fn from(addr: std::net::SocketAddrV4) -> Self {
+                       SocketAddress::TcpIpV4 { addr: addr.ip().octets(), port: addr.port() }
+               }
+}
+
+#[cfg(feature = "std")]
+impl From<std::net::SocketAddrV6> for SocketAddress {
+               fn from(addr: std::net::SocketAddrV6) -> Self {
+                       SocketAddress::TcpIpV6 { addr: addr.ip().octets(), port: addr.port() }
+               }
+}
+
+#[cfg(feature = "std")]
+impl From<std::net::SocketAddr> for SocketAddress {
+               fn from(addr: std::net::SocketAddr) -> Self {
+                       match addr {
+                               std::net::SocketAddr::V4(addr) => addr.into(),
+                               std::net::SocketAddr::V6(addr) => addr.into(),
+                       }
+               }
+}
+
+fn parse_onion_address(host: &str, port: u16) -> Result<SocketAddress, SocketAddressParseError> {
+       if host.ends_with(".onion") {
+               let domain = &host[..host.len() - ".onion".len()];
+               if domain.len() != 56 {
+                       return Err(SocketAddressParseError::InvalidOnionV3);
+               }
+               let onion =  base32::Alphabet::RFC4648 { padding: false }.decode(&domain).map_err(|_| SocketAddressParseError::InvalidOnionV3)?;
+               if onion.len() != 35 {
+                       return Err(SocketAddressParseError::InvalidOnionV3);
+               }
+               let version = onion[0];
+               let first_checksum_flag = onion[1];
+               let second_checksum_flag = onion[2];
+               let mut ed25519_pubkey = [0; 32];
+               ed25519_pubkey.copy_from_slice(&onion[3..35]);
+               let checksum = u16::from_be_bytes([first_checksum_flag, second_checksum_flag]);
+               return Ok(SocketAddress::OnionV3 { ed25519_pubkey, checksum, version, port });
+
+       } else {
+               return Err(SocketAddressParseError::InvalidInput);
+       }
+}
+
+#[cfg(feature = "std")]
+impl FromStr for SocketAddress {
+       type Err = SocketAddressParseError;
+
+       fn from_str(s: &str) -> Result<Self, Self::Err> {
+               match std::net::SocketAddr::from_str(s) {
+                       Ok(addr) => Ok(addr.into()),
+                       Err(_) => {
+                               let trimmed_input = match s.rfind(":") {
+                                       Some(pos) => pos,
+                                       None => return Err(SocketAddressParseError::InvalidInput),
+                               };
+                               let host = &s[..trimmed_input];
+                               let port: u16 = s[trimmed_input + 1..].parse().map_err(|_| SocketAddressParseError::InvalidPort)?;
+                               if host.ends_with(".onion") {
+                                       return parse_onion_address(host, port);
+                               };
+                               if let Ok(hostname) = Hostname::try_from(s[..trimmed_input].to_string()) {
+                                       return Ok(SocketAddress::Hostname { hostname, port });
+                               };
+                               return Err(SocketAddressParseError::SocketAddrParse)
+                       },
+               }
+       }
+}
+
 /// Represents the set of gossip messages that require a signature from a node's identity key.
 pub enum UnsignedGossipMessage<'a> {
        /// An unsigned channel announcement.
@@ -939,7 +1043,7 @@ pub struct UnsignedNodeAnnouncement {
        /// This should be sanitized before use. There is no guarantee of uniqueness.
        pub alias: NodeAlias,
        /// List of addresses on which this node is reachable
-       pub addresses: Vec<NetAddress>,
+       pub addresses: Vec<SocketAddress>,
        pub(crate) excess_address_data: Vec<u8>,
        pub(crate) excess_data: Vec<u8>,
 }
@@ -1230,7 +1334,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider {
        /// Handle an incoming `channel_ready` message from the given peer.
        fn handle_channel_ready(&self, their_node_id: &PublicKey, msg: &ChannelReady);
 
-       // Channl close:
+       // Channel close:
        /// Handle an incoming `shutdown` message from the given peer.
        fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown);
        /// Handle an incoming `closing_signed` message from the given peer.
@@ -1417,6 +1521,8 @@ pub trait OnionMessageHandler : OnionMessageProvider {
 }
 
 mod fuzzy_internal_msgs {
+       use bitcoin::secp256k1::PublicKey;
+       use crate::blinded_path::payment::PaymentConstraints;
        use crate::prelude::*;
        use crate::ln::{PaymentPreimage, PaymentSecret};
 
@@ -1445,6 +1551,14 @@ mod fuzzy_internal_msgs {
                        amt_msat: u64,
                        outgoing_cltv_value: u32,
                },
+               BlindedReceive {
+                       amt_msat: u64,
+                       total_msat: u64,
+                       outgoing_cltv_value: u32,
+                       payment_secret: PaymentSecret,
+                       payment_constraints: PaymentConstraints,
+                       intro_node_blinding_point: PublicKey,
+               }
        }
 
        pub(crate) enum OutboundOnionPayload {
@@ -1462,6 +1576,17 @@ mod fuzzy_internal_msgs {
                        amt_msat: u64,
                        outgoing_cltv_value: u32,
                },
+               BlindedForward {
+                       encrypted_tlvs: Vec<u8>,
+                       intro_node_blinding_point: Option<PublicKey>,
+               },
+               BlindedReceive {
+                       amt_msat: u64,
+                       total_msat: u64,
+                       outgoing_cltv_value: u32,
+                       encrypted_tlvs: Vec<u8>,
+                       intro_node_blinding_point: Option<PublicKey>, // Set if the introduction node of the blinded path is the final node
+               }
        }
 
        pub struct DecodedOnionErrorPacket {
@@ -1774,7 +1899,7 @@ impl Readable for Init {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let global_features: InitFeatures = Readable::read(r)?;
                let features: InitFeatures = Readable::read(r)?;
-               let mut remote_network_address: Option<NetAddress> = None;
+               let mut remote_network_address: Option<SocketAddress> = None;
                let mut networks: Option<WithoutLength<Vec<ChainHash>>> = None;
                decode_tlv_stream!(r, {
                        (1, networks, option),
@@ -1997,29 +2122,53 @@ impl Writeable for OutboundOnionPayload {
                                        (16, payment_metadata.as_ref().map(|m| WithoutLength(m)), option)
                                }, custom_tlvs.iter());
                        },
+                       Self::BlindedForward { encrypted_tlvs, intro_node_blinding_point } => {
+                               _encode_varint_length_prefixed_tlv!(w, {
+                                       (10, *encrypted_tlvs, required_vec),
+                                       (12, intro_node_blinding_point, option)
+                               });
+                       },
+                       Self::BlindedReceive {
+                               amt_msat, total_msat, outgoing_cltv_value, encrypted_tlvs,
+                               intro_node_blinding_point,
+                       } => {
+                               _encode_varint_length_prefixed_tlv!(w, {
+                                       (2, HighZeroBytesDroppedBigSize(*amt_msat), required),
+                                       (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+                                       (10, *encrypted_tlvs, required_vec),
+                                       (12, intro_node_blinding_point, option),
+                                       (18, HighZeroBytesDroppedBigSize(*total_msat), required)
+                               });
+                       },
                }
                Ok(())
        }
 }
 
-impl Readable for InboundOnionPayload {
-       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
-               let mut amt = HighZeroBytesDroppedBigSize(0u64);
-               let mut cltv_value = HighZeroBytesDroppedBigSize(0u32);
+impl<NS: Deref> ReadableArgs<&NS> for InboundOnionPayload where NS::Target: NodeSigner {
+       fn read<R: Read>(r: &mut R, node_signer: &NS) -> Result<Self, DecodeError> {
+               let mut amt = None;
+               let mut cltv_value = None;
                let mut short_id: Option<u64> = None;
                let mut payment_data: Option<FinalOnionHopData> = None;
+               let mut encrypted_tlvs_opt: Option<WithoutLength<Vec<u8>>> = None;
+               let mut intro_node_blinding_point = None;
                let mut payment_metadata: Option<WithoutLength<Vec<u8>>> = None;
+               let mut total_msat = None;
                let mut keysend_preimage: Option<PaymentPreimage> = None;
                let mut custom_tlvs = Vec::new();
 
                let tlv_len = BigSize::read(r)?;
                let rd = FixedLengthReader::new(r, tlv_len.0);
                decode_tlv_stream_with_custom_tlv_decode!(rd, {
-                       (2, amt, required),
-                       (4, cltv_value, required),
+                       (2, amt, (option, encoding: (u64, HighZeroBytesDroppedBigSize))),
+                       (4, cltv_value, (option, encoding: (u32, HighZeroBytesDroppedBigSize))),
                        (6, short_id, option),
                        (8, payment_data, option),
+                       (10, encrypted_tlvs_opt, option),
+                       (12, intro_node_blinding_point, option),
                        (16, payment_metadata, option),
+                       (18, total_msat, (option, encoding: (u64, HighZeroBytesDroppedBigSize))),
                        // See https://github.com/lightning/blips/blob/master/blip-0003.md
                        (5482373484, keysend_preimage, option)
                }, |msg_type: u64, msg_reader: &mut FixedLengthReader<_>| -> Result<bool, DecodeError> {
@@ -2030,16 +2179,44 @@ impl Readable for InboundOnionPayload {
                        Ok(true)
                });
 
-               if amt.0 > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
-               if let Some(short_channel_id) = short_id {
-                       if payment_data.is_some() { return Err(DecodeError::InvalidValue) }
-                       if payment_metadata.is_some() { return Err(DecodeError::InvalidValue); }
+               if amt.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
+
+               if let Some(blinding_point) = intro_node_blinding_point {
+                       if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() {
+                               return Err(DecodeError::InvalidValue)
+                       }
+                       let enc_tlvs = encrypted_tlvs_opt.ok_or(DecodeError::InvalidValue)?.0;
+                       let enc_tlvs_ss = node_signer.ecdh(Recipient::Node, &blinding_point, None)
+                               .map_err(|_| DecodeError::InvalidValue)?;
+                       let rho = onion_utils::gen_rho_from_shared_secret(&enc_tlvs_ss.secret_bytes());
+                       let mut s = Cursor::new(&enc_tlvs);
+                       let mut reader = FixedLengthReader::new(&mut s, enc_tlvs.len() as u64);
+                       match ChaChaPolyReadAdapter::read(&mut reader, rho)? {
+                               ChaChaPolyReadAdapter { readable: ReceiveTlvs { payment_secret, payment_constraints }} => {
+                                       if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
+                                       Ok(Self::BlindedReceive {
+                                               amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+                                               total_msat: total_msat.ok_or(DecodeError::InvalidValue)?,
+                                               outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
+                                               payment_secret,
+                                               payment_constraints,
+                                               intro_node_blinding_point: blinding_point,
+                                       })
+                               },
+                       }
+               } else if let Some(short_channel_id) = short_id {
+                       if payment_data.is_some() || payment_metadata.is_some() || encrypted_tlvs_opt.is_some() ||
+                               total_msat.is_some()
+                       { return Err(DecodeError::InvalidValue) }
                        Ok(Self::Forward {
                                short_channel_id,
-                               amt_to_forward: amt.0,
-                               outgoing_cltv_value: cltv_value.0,
+                               amt_to_forward: amt.ok_or(DecodeError::InvalidValue)?,
+                               outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
                        })
                } else {
+                       if encrypted_tlvs_opt.is_some() || total_msat.is_some() {
+                               return Err(DecodeError::InvalidValue)
+                       }
                        if let Some(data) = &payment_data {
                                if data.total_msat > MAX_VALUE_MSAT {
                                        return Err(DecodeError::InvalidValue);
@@ -2049,22 +2226,14 @@ impl Readable for InboundOnionPayload {
                                payment_data,
                                payment_metadata: payment_metadata.map(|w| w.0),
                                keysend_preimage,
-                               amt_msat: amt.0,
-                               outgoing_cltv_value: cltv_value.0,
+                               amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+                               outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
                                custom_tlvs,
                        })
                }
        }
 }
 
-// ReadableArgs because we need onion_utils::decode_next_hop to accommodate payment packets and
-// onion message packets.
-impl ReadableArgs<()> for InboundOnionPayload {
-       fn read<R: Read>(r: &mut R, _arg: ()) -> Result<Self, DecodeError> {
-               <Self as Readable>::read(r)
-       }
-}
-
 impl Writeable for Ping {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.ponglen.write(w)?;
@@ -2273,7 +2442,7 @@ impl Readable for UnsignedNodeAnnouncement {
                let alias: NodeAlias = Readable::read(r)?;
 
                let addr_len: u16 = Readable::read(r)?;
-               let mut addresses: Vec<NetAddress> = Vec::new();
+               let mut addresses: Vec<SocketAddress> = Vec::new();
                let mut addr_readpos = 0;
                let mut excess = false;
                let mut excess_byte = 0;
@@ -2472,14 +2641,18 @@ impl_writeable_msg!(GossipTimestampFilter, {
 
 #[cfg(test)]
 mod tests {
+       use std::convert::TryFrom;
        use bitcoin::blockdata::constants::ChainHash;
        use bitcoin::{Transaction, PackedLockTime, TxIn, Script, Sequence, Witness, TxOut};
        use hex;
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use crate::ln::ChannelId;
        use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
        use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket};
+       use crate::ln::msgs::SocketAddress;
        use crate::routing::gossip::{NodeAlias, NodeId};
-       use crate::util::ser::{Writeable, Readable, Hostname, TransactionU16LenLimited};
+       use crate::util::ser::{Writeable, Readable, ReadableArgs, Hostname, TransactionU16LenLimited};
+       use crate::util::test_utils;
 
        use bitcoin::hashes::hex::FromHex;
        use bitcoin::util::address::Address;
@@ -2493,11 +2666,13 @@ mod tests {
 
        use crate::io::{self, Cursor};
        use crate::prelude::*;
-       use core::convert::TryFrom;
        use core::str::FromStr;
-
        use crate::chain::transaction::OutPoint;
 
+       #[cfg(feature = "std")]
+       use std::net::{Ipv4Addr, Ipv6Addr};
+       use crate::ln::msgs::SocketAddressParseError;
+
        #[test]
        fn encoding_channel_reestablish() {
                let public_key = {
@@ -2506,7 +2681,7 @@ mod tests {
                };
 
                let cr = msgs::ChannelReestablish {
-                       channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+                       channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]),
                        next_local_commitment_number: 3,
                        next_remote_commitment_number: 4,
                        your_last_per_commitment_secret: [9;32],
@@ -2535,7 +2710,7 @@ mod tests {
                };
 
                let cr = msgs::ChannelReestablish {
-                       channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+                       channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]),
                        next_local_commitment_number: 3,
                        next_remote_commitment_number: 4,
                        your_last_per_commitment_secret: [9;32],
@@ -2587,7 +2762,7 @@ mod tests {
                let sig_1 = get_sig_on!(privkey, secp_ctx, String::from("01010101010101010101010101010101"));
                let sig_2 = get_sig_on!(privkey, secp_ctx, String::from("02020202020202020202020202020202"));
                let announcement_signatures = msgs::AnnouncementSignatures {
-                       channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+                       channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]),
                        short_channel_id: 2316138423780173,
                        node_signature: sig_1,
                        bitcoin_signature: sig_2,
@@ -2663,24 +2838,24 @@ mod tests {
                };
                let mut addresses = Vec::new();
                if ipv4 {
-                       addresses.push(msgs::NetAddress::IPv4 {
+                       addresses.push(SocketAddress::TcpIpV4 {
                                addr: [255, 254, 253, 252],
                                port: 9735
                        });
                }
                if ipv6 {
-                       addresses.push(msgs::NetAddress::IPv6 {
+                       addresses.push(SocketAddress::TcpIpV6 {
                                addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
                                port: 9735
                        });
                }
                if onionv2 {
-                       addresses.push(msgs::NetAddress::OnionV2(
+                       addresses.push(msgs::SocketAddress::OnionV2(
                                [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]
                        ));
                }
                if onionv3 {
-                       addresses.push(msgs::NetAddress::OnionV3 {
+                       addresses.push(msgs::SocketAddress::OnionV3 {
                                ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224],
                                checksum: 32,
                                version: 16,
@@ -2688,7 +2863,7 @@ mod tests {
                        });
                }
                if hostname {
-                       addresses.push(msgs::NetAddress::Hostname {
+                       addresses.push(SocketAddress::Hostname {
                                hostname: Hostname::try_from(String::from("host")).unwrap(),
                                port: 9735,
                        });
@@ -2823,7 +2998,7 @@ mod tests {
                let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
                let open_channel = msgs::OpenChannel {
                        chain_hash: BlockHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap(),
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_satoshis: 1311768467284833366,
                        push_msat: 2536655962884945560,
                        dust_limit_satoshis: 3608586615801332854,
@@ -2884,7 +3059,7 @@ mod tests {
                let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx);
                let open_channelv2 = msgs::OpenChannelV2 {
                        chain_hash: BlockHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap(),
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_feerate_sat_per_1000_weight: 821716,
                        commitment_feerate_sat_per_1000_weight: 821716,
                        funding_satoshis: 1311768467284833366,
@@ -2974,7 +3149,7 @@ mod tests {
                let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx);
                let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
                let accept_channel = msgs::AcceptChannel {
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        dust_limit_satoshis: 1311768467284833366,
                        max_htlc_value_in_flight_msat: 2536655962884945560,
                        channel_reserve_satoshis: 3608586615801332854,
@@ -3017,7 +3192,7 @@ mod tests {
                let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
                let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx);
                let accept_channelv2 = msgs::AcceptChannelV2 {
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_satoshis: 1311768467284833366,
                        dust_limit_satoshis: 1311768467284833366,
                        max_htlc_value_in_flight_msat: 2536655962884945560,
@@ -3071,7 +3246,7 @@ mod tests {
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
                let funding_created = msgs::FundingCreated {
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_txid: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
                        funding_output_index: 255,
                        signature: sig_1,
@@ -3091,7 +3266,7 @@ mod tests {
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
                let funding_signed = msgs::FundingSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        signature: sig_1,
                        #[cfg(taproot)]
                        partial_signature_with_nonce: None,
@@ -3106,7 +3281,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let (_, pubkey_1,) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let channel_ready = msgs::ChannelReady {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        next_per_commitment_point: pubkey_1,
                        short_channel_id_alias: None,
                };
@@ -3118,7 +3293,7 @@ mod tests {
        #[test]
        fn encoding_tx_add_input() {
                let tx_add_input = msgs::TxAddInput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                        prevtx: TransactionU16LenLimited::new(Transaction {
                                version: 2,
@@ -3153,7 +3328,7 @@ mod tests {
        #[test]
        fn encoding_tx_add_output() {
                let tx_add_output = msgs::TxAddOutput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                        sats: 4886718345,
                        script: Address::from_str("bc1qxmk834g5marzm227dgqvynd23y2nvt2ztwcw2z").unwrap().script_pubkey(),
@@ -3166,7 +3341,7 @@ mod tests {
        #[test]
        fn encoding_tx_remove_input() {
                let tx_remove_input = msgs::TxRemoveInput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                };
                let encoded_value = tx_remove_input.encode();
@@ -3177,7 +3352,7 @@ mod tests {
        #[test]
        fn encoding_tx_remove_output() {
                let tx_remove_output = msgs::TxRemoveOutput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                };
                let encoded_value = tx_remove_output.encode();
@@ -3188,7 +3363,7 @@ mod tests {
        #[test]
        fn encoding_tx_complete() {
                let tx_complete = msgs::TxComplete {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                };
                let encoded_value = tx_complete.encode();
                let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap();
@@ -3198,7 +3373,7 @@ mod tests {
        #[test]
        fn encoding_tx_signatures() {
                let tx_signatures = msgs::TxSignatures {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        tx_hash: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
                        witnesses: vec![
                                Witness::from_vec(vec![
@@ -3232,7 +3407,7 @@ mod tests {
 
        fn do_encoding_tx_init_rbf(funding_value_with_hex_target: Option<(i64, &str)>) {
                let tx_init_rbf = msgs::TxInitRbf {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        locktime: 305419896,
                        feerate_sat_per_1000_weight: 20190119,
                        funding_output_contribution: if let Some((value, _)) = funding_value_with_hex_target { Some(value) } else { None },
@@ -3258,7 +3433,7 @@ mod tests {
 
        fn do_encoding_tx_ack_rbf(funding_value_with_hex_target: Option<(i64, &str)>) {
                let tx_ack_rbf = msgs::TxAckRbf {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        funding_output_contribution: if let Some((value, _)) = funding_value_with_hex_target { Some(value) } else { None },
                };
                let encoded_value = tx_ack_rbf.encode();
@@ -3281,7 +3456,7 @@ mod tests {
        #[test]
        fn encoding_tx_abort() {
                let tx_abort = msgs::TxAbort {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        data: hex::decode("54686520717569636B2062726F776E20666F78206A756D7073206F76657220746865206C617A7920646F672E").unwrap(),
                };
                let encoded_value = tx_abort.encode();
@@ -3294,12 +3469,12 @@ mod tests {
                let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let script = Builder::new().push_opcode(opcodes::OP_TRUE).into_script();
                let shutdown = msgs::Shutdown {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        scriptpubkey:
-                                    if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() }
+                               if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() }
                                else if script_type == 2 { Address::p2sh(&script, Network::Testnet).unwrap().script_pubkey() }
                                else if script_type == 3 { Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).unwrap().script_pubkey() }
-                               else                     { Address::p2wsh(&script, Network::Testnet).script_pubkey() },
+                               else { Address::p2wsh(&script, Network::Testnet).script_pubkey() },
                };
                let encoded_value = shutdown.encode();
                let mut target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap();
@@ -3329,7 +3504,7 @@ mod tests {
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
                let closing_signed = msgs::ClosingSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        fee_satoshis: 2316138423780173,
                        signature: sig_1,
                        fee_range: None,
@@ -3340,7 +3515,7 @@ mod tests {
                assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value)).unwrap(), closing_signed);
 
                let closing_signed_with_range = msgs::ClosingSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        fee_satoshis: 2316138423780173,
                        signature: sig_1,
                        fee_range: Some(msgs::ClosingSignedFeeRange {
@@ -3366,7 +3541,7 @@ mod tests {
                        hmac: [2; 32]
                };
                let update_add_htlc = msgs::UpdateAddHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        amount_msat: 3608586615801332854,
                        payment_hash: PaymentHash([1; 32]),
@@ -3382,7 +3557,7 @@ mod tests {
        #[test]
        fn encoding_update_fulfill_htlc() {
                let update_fulfill_htlc = msgs::UpdateFulfillHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        payment_preimage: PaymentPreimage([1; 32]),
                };
@@ -3397,7 +3572,7 @@ mod tests {
                        data: [1; 32].to_vec(),
                };
                let update_fail_htlc = msgs::UpdateFailHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        reason
                };
@@ -3409,7 +3584,7 @@ mod tests {
        #[test]
        fn encoding_update_fail_malformed_htlc() {
                let update_fail_malformed_htlc = msgs::UpdateFailMalformedHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        sha256_of_onion: [1; 32],
                        failure_code: 255
@@ -3430,7 +3605,7 @@ mod tests {
                let sig_3 = get_sig_on!(privkey_3, secp_ctx, String::from("01010101010101010101010101010101"));
                let sig_4 = get_sig_on!(privkey_4, secp_ctx, String::from("01010101010101010101010101010101"));
                let commitment_signed = msgs::CommitmentSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        signature: sig_1,
                        htlc_signatures: if htlcs { vec![sig_2, sig_3, sig_4] } else { Vec::new() },
                        #[cfg(taproot)]
@@ -3457,7 +3632,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let raa = msgs::RevokeAndACK {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        per_commitment_secret: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                        next_per_commitment_point: pubkey_1,
                        #[cfg(taproot)]
@@ -3471,7 +3646,7 @@ mod tests {
        #[test]
        fn encoding_update_fee() {
                let update_fee = msgs::UpdateFee {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        feerate_per_kw: 20190119,
                };
                let encoded_value = update_fee.encode();
@@ -3504,7 +3679,7 @@ mod tests {
                }.encode(), hex::decode("00000000014001010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap());
                let init_msg = msgs::Init { features: InitFeatures::from_le_bytes(vec![]),
                        networks: Some(vec![mainnet_hash]),
-                       remote_network_address: Some(msgs::NetAddress::IPv4 {
+                       remote_network_address: Some(SocketAddress::TcpIpV4 {
                                addr: [127, 0, 0, 1],
                                port: 1000,
                        }),
@@ -3518,7 +3693,7 @@ mod tests {
        #[test]
        fn encoding_error() {
                let error = msgs::ErrorMessage {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        data: String::from("rust-lightning"),
                };
                let encoded_value = error.encode();
@@ -3529,7 +3704,7 @@ mod tests {
        #[test]
        fn encoding_warning() {
                let error = msgs::WarningMessage {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        data: String::from("rust-lightning"),
                };
                let encoded_value = error.encode();
@@ -3569,8 +3744,11 @@ mod tests {
                let target_value = hex::decode("1a02080badf00d010203040404ffffffff0608deadbeef1bad1dea").unwrap();
                assert_eq!(encoded_value, target_value);
 
-               let inbound_msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
-               if let msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } = inbound_msg {
+               let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+               if let msgs::InboundOnionPayload::Forward {
+                       short_channel_id, amt_to_forward, outgoing_cltv_value
+               } = inbound_msg {
                        assert_eq!(short_channel_id, 0xdeadbeef1bad1dea);
                        assert_eq!(amt_to_forward, 0x0badf00d01020304);
                        assert_eq!(outgoing_cltv_value, 0xffffffff);
@@ -3591,8 +3769,11 @@ mod tests {
                let target_value = hex::decode("1002080badf00d010203040404ffffffff").unwrap();
                assert_eq!(encoded_value, target_value);
 
-               let inbound_msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
-               if let msgs::InboundOnionPayload::Receive { payment_data: None, amt_msat, outgoing_cltv_value, .. } = inbound_msg {
+               let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
+               if let msgs::InboundOnionPayload::Receive {
+                       payment_data: None, amt_msat, outgoing_cltv_value, ..
+               } = inbound_msg {
                        assert_eq!(amt_msat, 0x0badf00d01020304);
                        assert_eq!(outgoing_cltv_value, 0xffffffff);
                } else { panic!(); }
@@ -3616,7 +3797,8 @@ mod tests {
                let target_value = hex::decode("3602080badf00d010203040404ffffffff082442424242424242424242424242424242424242424242424242424242424242421badca1f").unwrap();
                assert_eq!(encoded_value, target_value);
 
-               let inbound_msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+               let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
                if let msgs::InboundOnionPayload::Receive {
                        payment_data: Some(FinalOnionHopData {
                                payment_secret,
@@ -3651,7 +3833,8 @@ mod tests {
                        outgoing_cltv_value: 0xffffffff,
                };
                let encoded_value = msg.encode();
-               assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..])).is_err());
+               let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+               assert!(msgs::InboundOnionPayload::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).is_err());
                let good_type_range_tlvs = vec![
                        ((1 << 16) - 3, vec![42]),
                        ((1 << 16) - 1, vec![42; 32]),
@@ -3660,7 +3843,7 @@ mod tests {
                        *custom_tlvs = good_type_range_tlvs.clone();
                }
                let encoded_value = msg.encode();
-               let inbound_msg = Readable::read(&mut Cursor::new(&encoded_value[..])).unwrap();
+               let inbound_msg = ReadableArgs::read(&mut Cursor::new(&encoded_value[..]), &&node_signer).unwrap();
                match inbound_msg {
                        msgs::InboundOnionPayload::Receive { custom_tlvs, .. } => assert!(custom_tlvs.is_empty()),
                        _ => panic!(),
@@ -3684,7 +3867,8 @@ mod tests {
                let encoded_value = msg.encode();
                let target_value = hex::decode("2e02080badf00d010203040404ffffffffff0000000146c6616b021234ff0000000146c6616f084242424242424242").unwrap();
                assert_eq!(encoded_value, target_value);
-               let inbound_msg: msgs::InboundOnionPayload = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
+               let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+               let inbound_msg: msgs::InboundOnionPayload = ReadableArgs::read(&mut Cursor::new(&target_value[..]), &&node_signer).unwrap();
                if let msgs::InboundOnionPayload::Receive {
                        payment_data: None,
                        payment_metadata: None,
@@ -3847,7 +4031,10 @@ mod tests {
                // payload length to be encoded over multiple bytes rather than a single u8.
                let big_payload = encode_big_payload().unwrap();
                let mut rd = Cursor::new(&big_payload[..]);
-               <msgs::InboundOnionPayload as Readable>::read(&mut rd).unwrap();
+
+               let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
+               <msgs::InboundOnionPayload as ReadableArgs<&&test_utils::TestKeysInterface>>
+                       ::read(&mut rd, &&node_signer).unwrap();
        }
        // see above test, needs to be a separate method for use of the serialization macros.
        fn encode_big_payload() -> Result<Vec<u8>, io::Error> {
@@ -3869,4 +4056,47 @@ mod tests {
                }
                Ok(encoded_payload)
        }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_socket_address_from_str() {
+               assert_eq!(SocketAddress::TcpIpV4 {
+                       addr: Ipv4Addr::new(127, 0, 0, 1).octets(),
+                       port: 1234,
+               }, SocketAddress::from_str("127.0.0.1:1234").unwrap());
+
+               assert_eq!(SocketAddress::TcpIpV6 {
+                       addr: Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).octets(),
+                       port: 1234,
+               }, SocketAddress::from_str("[0:0:0:0:0:0:0:1]:1234").unwrap());
+               assert_eq!(
+                       SocketAddress::Hostname {
+                               hostname: Hostname::try_from("lightning-node.mydomain.com".to_string()).unwrap(),
+                               port: 1234,
+                       }, SocketAddress::from_str("lightning-node.mydomain.com:1234").unwrap());
+               assert_eq!(
+                       SocketAddress::Hostname {
+                               hostname: Hostname::try_from("example.com".to_string()).unwrap(),
+                               port: 1234,
+                       }, SocketAddress::from_str("example.com:1234").unwrap());
+               assert_eq!(SocketAddress::OnionV3 {
+                       ed25519_pubkey: [37, 24, 75, 5, 25, 73, 117, 194, 139, 102, 182, 107, 4, 105, 247, 246, 85,
+                       111, 177, 172, 49, 137, 167, 155, 64, 221, 163, 47, 31, 33, 71, 3],
+                       checksum: 48326,
+                       version: 121,
+                       port: 1234
+               }, SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:1234").unwrap());
+               assert_eq!(Err(SocketAddressParseError::InvalidOnionV3), SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6.onion:1234"));
+               assert_eq!(Err(SocketAddressParseError::InvalidInput), SocketAddress::from_str("127.0.0.1@1234"));
+               assert_eq!(Err(SocketAddressParseError::InvalidInput), "".parse::<SocketAddress>());
+               assert!(SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:9735:94").is_err());
+               assert!(SocketAddress::from_str("wrong$%#.com:1234").is_err());
+               assert_eq!(Err(SocketAddressParseError::InvalidPort), SocketAddress::from_str("example.com:wrong"));
+               assert!("localhost".parse::<SocketAddress>().is_err());
+               assert!("localhost:invalid-port".parse::<SocketAddress>().is_err());
+               assert!( "invalid-onion-v3-hostname.onion:8080".parse::<SocketAddress>().is_err());
+               assert!("b32.example.onion:invalid-port".parse::<SocketAddress>().is_err());
+               assert!("invalid-address".parse::<SocketAddress>().is_err());
+               assert!(SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:1234").is_err());
+       }
 }
index d326c1aab74441f1203f532ea48a333b85f8c375..3731c31c5b42c372663f29e31d30b6f6207ea811 100644 (file)
@@ -19,7 +19,7 @@ use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
 use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields};
 use crate::ln::onion_utils;
 use crate::routing::gossip::{NetworkUpdate, RoutingFees};
-use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop};
+use crate::routing::router::{get_route, PaymentParameters, Route, RouteParameters, RouteHint, RouteHintHop};
 use crate::ln::features::{InitFeatures, Bolt11InvoiceFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
@@ -516,7 +516,7 @@ fn test_onion_failure() {
        let short_channel_id = channels[1].0.contents.short_channel_id;
        let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
                .unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap()
-               .context.get_counterparty_htlc_minimum_msat() - 1;
+               .context().get_counterparty_htlc_minimum_msat() - 1;
        let mut bogus_route = route.clone();
        let route_len = bogus_route.paths[0].hops.len();
        bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;
@@ -1048,10 +1048,11 @@ macro_rules! get_phantom_route {
                ])]).unwrap();
                let scorer = test_utils::TestScorer::new();
                let network_graph = $nodes[0].network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, $amt);
                (get_route(
-                       &$nodes[0].node.get_our_node_id(), &payment_params, &network_graph,
+                       &$nodes[0].node.get_our_node_id(), &route_params, &network_graph,
                        Some(&$nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
-                       $amt, $nodes[0].logger, &scorer, &(), &[0u8; 32]
+                       $nodes[0].logger, &scorer, &(), &[0u8; 32]
                ).unwrap(), phantom_route_hint.phantom_scid)
        }
 }}
index d55077770f3baae35e2dab9c2f95fd2136071a50..666221b2dd47fbe400b506d7602435905f9a2010 100644 (file)
@@ -12,7 +12,8 @@ use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields};
 use crate::ln::msgs;
 use crate::ln::wire::Encode;
 use crate::routing::gossip::NetworkUpdate;
-use crate::routing::router::{Path, RouteHop};
+use crate::routing::router::{BlindedTail, Path, RouteHop};
+use crate::sign::NodeSigner;
 use crate::util::chacha20::{ChaCha20, ChaChaReader};
 use crate::util::errors::{self, APIError};
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, LengthCalculatingWriter};
@@ -169,7 +170,9 @@ pub(super) fn build_onion_payloads(path: &Path, total_msat: u64, mut recipient_o
        let mut cur_value_msat = 0u64;
        let mut cur_cltv = starting_htlc_offset;
        let mut last_short_channel_id = 0;
-       let mut res: Vec<msgs::OutboundOnionPayload> = Vec::with_capacity(path.hops.len());
+       let mut res: Vec<msgs::OutboundOnionPayload> = Vec::with_capacity(
+               path.hops.len() + path.blinded_tail.as_ref().map_or(0, |t| t.hops.len())
+       );
 
        for (idx, hop) in path.hops.iter().rev().enumerate() {
                // First hop gets special values so that it can check, on receipt, that everything is
@@ -177,27 +180,51 @@ pub(super) fn build_onion_payloads(path: &Path, total_msat: u64, mut recipient_o
                // the intended recipient).
                let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
                let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
-               res.insert(0, if idx == 0 {
-                       msgs::OutboundOnionPayload::Receive {
-                               payment_data: if let Some(secret) = recipient_onion.payment_secret.take() {
-                                       Some(msgs::FinalOnionHopData {
-                                               payment_secret: secret,
-                                               total_msat,
-                                       })
-                               } else { None },
-                               payment_metadata: recipient_onion.payment_metadata.take(),
-                               keysend_preimage: *keysend_preimage,
-                               custom_tlvs: recipient_onion.custom_tlvs.clone(),
-                               amt_msat: value_msat,
-                               outgoing_cltv_value: cltv,
+               if idx == 0 {
+                       if let Some(BlindedTail {
+                               blinding_point, hops, final_value_msat, excess_final_cltv_expiry_delta, ..
+                       }) = &path.blinded_tail {
+                               let mut blinding_point = Some(*blinding_point);
+                               for (i, blinded_hop) in hops.iter().enumerate() {
+                                       if i == hops.len() - 1 {
+                                               cur_value_msat += final_value_msat;
+                                               cur_cltv += excess_final_cltv_expiry_delta;
+                                               res.push(msgs::OutboundOnionPayload::BlindedReceive {
+                                                       amt_msat: *final_value_msat,
+                                                       total_msat,
+                                                       outgoing_cltv_value: cltv,
+                                                       encrypted_tlvs: blinded_hop.encrypted_payload.clone(),
+                                                       intro_node_blinding_point: blinding_point.take(),
+                                               });
+                                       } else {
+                                               res.push(msgs::OutboundOnionPayload::BlindedForward {
+                                                       encrypted_tlvs: blinded_hop.encrypted_payload.clone(),
+                                                       intro_node_blinding_point: blinding_point.take(),
+                                               });
+                                       }
+                               }
+                       } else {
+                               res.push(msgs::OutboundOnionPayload::Receive {
+                                       payment_data: if let Some(secret) = recipient_onion.payment_secret.take() {
+                                               Some(msgs::FinalOnionHopData {
+                                                       payment_secret: secret,
+                                                       total_msat,
+                                               })
+                                       } else { None },
+                                       payment_metadata: recipient_onion.payment_metadata.take(),
+                                       keysend_preimage: *keysend_preimage,
+                                       custom_tlvs: recipient_onion.custom_tlvs.clone(),
+                                       amt_msat: value_msat,
+                                       outgoing_cltv_value: cltv,
+                               });
                        }
                } else {
-                       msgs::OutboundOnionPayload::Forward {
+                       res.insert(0, msgs::OutboundOnionPayload::Forward {
                                short_channel_id: last_short_channel_id,
                                amt_to_forward: value_msat,
                                outgoing_cltv_value: cltv,
-                       }
-               });
+                       });
+               }
                cur_value_msat += hop.fee_msat;
                if cur_value_msat >= 21000000 * 100000000 * 1000 {
                        return Err(APIError::InvalidRoute{err: "Channel fees overflowed?".to_owned()});
@@ -859,8 +886,11 @@ pub(crate) enum OnionDecodeErr {
        },
 }
 
-pub(crate) fn decode_next_payment_hop(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash) -> Result<Hop, OnionDecodeErr> {
-       match decode_next_hop(shared_secret, hop_data, hmac_bytes, Some(payment_hash), ()) {
+pub(crate) fn decode_next_payment_hop<NS: Deref>(
+       shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash,
+       node_signer: &NS,
+) -> Result<Hop, OnionDecodeErr> where NS::Target: NodeSigner {
+       match decode_next_hop(shared_secret, hop_data, hmac_bytes, Some(payment_hash), node_signer) {
                Ok((next_hop_data, None)) => Ok(Hop::Receive(next_hop_data)),
                Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => {
                        Ok(Hop::Forward {
@@ -1007,7 +1037,7 @@ mod tests {
                                                short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
                                        },
                        ], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
 
                let onion_keys = super::construct_onion_keys(&secp_ctx, &route.paths[0], &get_test_session_key()).unwrap();
index 4d6f1f00b568017edd3d0933dd5923dc48f50b79..5ea772e5d4ffbfb0b82c9fc19e014781e3219aa0 100644 (file)
@@ -16,8 +16,9 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey};
 use crate::sign::{EntropySource, NodeSigner, Recipient};
 use crate::events::{self, PaymentFailureReason};
 use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, IDEMPOTENCY_TIMEOUT_TICKS, PaymentId};
+use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, PaymentId};
 use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason};
+use crate::offers::invoice::Bolt12Invoice;
 use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
 use crate::util::errors::APIError;
 use crate::util::logger::Logger;
@@ -32,12 +33,32 @@ use core::ops::Deref;
 use crate::prelude::*;
 use crate::sync::Mutex;
 
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the idempotency
+/// of payments by [`PaymentId`]. See [`OutboundPayments::remove_stale_payments`].
+///
+/// [`ChannelManager::timer_tick_occurred`]: crate::ln::channelmanager::ChannelManager::timer_tick_occurred
+pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until an invoice request without
+/// a response is timed out.
+///
+/// [`ChannelManager::timer_tick_occurred`]: crate::ln::channelmanager::ChannelManager::timer_tick_occurred
+const INVOICE_REQUEST_TIMEOUT_TICKS: u8 = 3;
+
 /// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
 /// and later, also stores information for retrying the payment.
 pub(crate) enum PendingOutboundPayment {
        Legacy {
                session_privs: HashSet<[u8; 32]>,
        },
+       AwaitingInvoice {
+               timer_ticks_without_response: u8,
+               retry_strategy: Retry,
+       },
+       InvoiceReceived {
+               payment_hash: PaymentHash,
+               retry_strategy: Retry,
+       },
        Retryable {
                retry_strategy: Option<Retry>,
                attempts: PaymentAttempts,
@@ -108,6 +129,12 @@ impl PendingOutboundPayment {
                        params.previously_failed_channels.push(scid);
                }
        }
+       fn is_awaiting_invoice(&self) -> bool {
+               match self {
+                       PendingOutboundPayment::AwaitingInvoice { .. } => true,
+                       _ => false,
+               }
+       }
        pub(super) fn is_fulfilled(&self) -> bool {
                match self {
                        PendingOutboundPayment::Fulfilled { .. } => true,
@@ -130,6 +157,8 @@ impl PendingOutboundPayment {
        fn payment_hash(&self) -> Option<PaymentHash> {
                match self {
                        PendingOutboundPayment::Legacy { .. } => None,
+                       PendingOutboundPayment::AwaitingInvoice { .. } => None,
+                       PendingOutboundPayment::InvoiceReceived { payment_hash, .. } => Some(*payment_hash),
                        PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
                        PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
                        PendingOutboundPayment::Abandoned { payment_hash, .. } => Some(*payment_hash),
@@ -142,8 +171,9 @@ impl PendingOutboundPayment {
                        PendingOutboundPayment::Legacy { session_privs } |
                                PendingOutboundPayment::Retryable { session_privs, .. } |
                                PendingOutboundPayment::Fulfilled { session_privs, .. } |
-                               PendingOutboundPayment::Abandoned { session_privs, .. }
-                       => session_privs,
+                               PendingOutboundPayment::Abandoned { session_privs, .. } => session_privs,
+                       PendingOutboundPayment::AwaitingInvoice { .. } |
+                               PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); return; },
                });
                let payment_hash = self.payment_hash();
                *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 };
@@ -158,6 +188,12 @@ impl PendingOutboundPayment {
                                payment_hash: *payment_hash,
                                reason: Some(reason)
                        };
+               } else if let PendingOutboundPayment::InvoiceReceived { payment_hash, .. } = self {
+                       *self = PendingOutboundPayment::Abandoned {
+                               session_privs: HashSet::new(),
+                               payment_hash: *payment_hash,
+                               reason: Some(reason)
+                       };
                }
        }
 
@@ -169,7 +205,9 @@ impl PendingOutboundPayment {
                                PendingOutboundPayment::Fulfilled { session_privs, .. } |
                                PendingOutboundPayment::Abandoned { session_privs, .. } => {
                                        session_privs.remove(session_priv)
-                               }
+                               },
+                       PendingOutboundPayment::AwaitingInvoice { .. } |
+                               PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
                };
                if remove_res {
                        if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
@@ -188,7 +226,9 @@ impl PendingOutboundPayment {
                        PendingOutboundPayment::Legacy { session_privs } |
                                PendingOutboundPayment::Retryable { session_privs, .. } => {
                                        session_privs.insert(session_priv)
-                               }
+                               },
+                       PendingOutboundPayment::AwaitingInvoice { .. } |
+                               PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
                        PendingOutboundPayment::Fulfilled { .. } => false,
                        PendingOutboundPayment::Abandoned { .. } => false,
                };
@@ -210,7 +250,9 @@ impl PendingOutboundPayment {
                                PendingOutboundPayment::Fulfilled { session_privs, .. } |
                                PendingOutboundPayment::Abandoned { session_privs, .. } => {
                                        session_privs.len()
-                               }
+                               },
+                       PendingOutboundPayment::AwaitingInvoice { .. } => 0,
+                       PendingOutboundPayment::InvoiceReceived { .. } => 0,
                }
        }
 }
@@ -223,7 +265,7 @@ pub enum Retry {
        /// Each attempt may be multiple HTLCs along multiple paths if the router decides to split up a
        /// retry, and may retry multiple failed HTLCs at once if they failed around the same time and
        /// were retried along a route from a single call to [`Router::find_route_with_id`].
-       Attempts(usize),
+       Attempts(u32),
        #[cfg(not(feature = "no-std"))]
        /// Time elapsed before abandoning retries for a payment. At least one attempt at payment is made;
        /// see [`PaymentParameters::expiry_time`] to avoid any attempt at payment after a specific time.
@@ -232,6 +274,19 @@ pub enum Retry {
        Timeout(core::time::Duration),
 }
 
+#[cfg(feature = "no-std")]
+impl_writeable_tlv_based_enum!(Retry,
+       ;
+       (0, Attempts)
+);
+
+#[cfg(not(feature = "no-std"))]
+impl_writeable_tlv_based_enum!(Retry,
+       ;
+       (0, Attempts),
+       (2, Timeout)
+);
+
 impl Retry {
        pub(crate) fn is_retryable_now(&self, attempts: &PaymentAttempts) -> bool {
                match (self, attempts) {
@@ -265,7 +320,7 @@ pub(crate) type PaymentAttempts = PaymentAttemptsUsingTime<ConfiguredTime>;
 pub(crate) struct PaymentAttemptsUsingTime<T: Time> {
        /// This count will be incremented only after the result of the attempt is known. When it's 0,
        /// it means the result of the first attempt is not known yet.
-       pub(crate) count: usize,
+       pub(crate) count: u32,
        /// This field is only used when retry is `Retry::Timeout` which is only build with feature std
        #[cfg(not(feature = "no-std"))]
        first_attempted_at: T,
@@ -401,6 +456,15 @@ pub enum PaymentSendFailure {
        },
 }
 
+/// An error when attempting to pay a BOLT 12 invoice.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(super) enum Bolt12PaymentError {
+       /// The invoice was not requested.
+       UnexpectedInvoice,
+       /// Payment for an invoice with the corresponding [`PaymentId`] was already initiated.
+       DuplicateInvoice,
+}
+
 /// Information which is provided, encrypted, to the payment recipient when sending HTLCs.
 ///
 /// This should generally be constructed with data communicated to us from the recipient (via a
@@ -638,6 +702,50 @@ impl OutboundPayments {
                }
        }
 
+       #[allow(unused)]
+       pub(super) fn send_payment_for_bolt12_invoice<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
+               &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R,
+               first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
+               best_block_height: u32, logger: &L,
+               pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
+               send_payment_along_path: SP,
+       ) -> Result<(), Bolt12PaymentError>
+       where
+               R::Target: Router,
+               ES::Target: EntropySource,
+               NS::Target: NodeSigner,
+               L::Target: Logger,
+               IH: Fn() -> InFlightHtlcs,
+               SP: Fn(SendAlongPathArgs) -> Result<(), APIError>,
+       {
+               let payment_hash = invoice.payment_hash();
+               match self.pending_outbound_payments.lock().unwrap().entry(payment_id) {
+                       hash_map::Entry::Occupied(entry) => match entry.get() {
+                               PendingOutboundPayment::AwaitingInvoice { retry_strategy, .. } => {
+                                       *entry.into_mut() = PendingOutboundPayment::InvoiceReceived {
+                                               payment_hash,
+                                               retry_strategy: *retry_strategy,
+                                       };
+                               },
+                               _ => return Err(Bolt12PaymentError::DuplicateInvoice),
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice),
+               };
+
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                       final_value_msat: invoice.amount_msats(),
+               };
+
+               self.find_route_and_send_payment(
+                       payment_hash, payment_id, route_params, router, first_hops, &inflight_htlcs,
+                       entropy_source, node_signer, best_block_height, logger, pending_events,
+                       &send_payment_along_path
+               );
+
+               Ok(())
+       }
+
        pub(super) fn check_retry_payments<R: Deref, ES: Deref, NS: Deref, SP, IH, FH, L: Deref>(
                &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
                best_block_height: u32,
@@ -672,14 +780,14 @@ impl OutboundPayments {
                        }
                        core::mem::drop(outbounds);
                        if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params {
-                               self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
+                               self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
                        } else { break }
                }
 
                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                outbounds.retain(|pmt_id, pmt| {
                        let mut retain = true;
-                       if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 {
+                       if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_awaiting_invoice() {
                                pmt.mark_abandoned(PaymentFailureReason::RetriesExhausted);
                                if let PendingOutboundPayment::Abandoned { payment_hash, reason, .. } = pmt {
                                        pending_events.lock().unwrap().push_back((events::Event::PaymentFailed {
@@ -697,7 +805,8 @@ impl OutboundPayments {
        pub(super) fn needs_abandon(&self) -> bool {
                let outbounds = self.pending_outbound_payments.lock().unwrap();
                outbounds.iter().any(|(_, pmt)|
-                       !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled())
+                       !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled() &&
+                       !pmt.is_awaiting_invoice())
        }
 
        /// Errors immediately on [`RetryableSendFailure`] error conditions. Otherwise, further errors may
@@ -757,7 +866,7 @@ impl OutboundPayments {
                Ok(())
        }
 
-       fn retry_payment_internal<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
+       fn find_route_and_send_payment<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
                &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters,
                router: &R, first_hops: Vec<ChannelDetails>, inflight_htlcs: &IH, entropy_source: &ES,
                node_signer: &NS, best_block_height: u32, logger: &L,
@@ -799,12 +908,6 @@ impl OutboundPayments {
                        }
                }
 
-               const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
-               let mut onion_session_privs = Vec::with_capacity(route.paths.len());
-               for _ in 0..route.paths.len() {
-                       onion_session_privs.push(entropy_source.get_secure_random_bytes());
-               }
-
                macro_rules! abandon_with_entry {
                        ($payment: expr, $reason: expr) => {
                                $payment.get_mut().mark_abandoned($reason);
@@ -820,31 +923,74 @@ impl OutboundPayments {
                                }
                        }
                }
-               let (total_msat, recipient_onion, keysend_preimage) = {
+               let (total_msat, recipient_onion, keysend_preimage, onion_session_privs) = {
                        let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                        match outbounds.entry(payment_id) {
                                hash_map::Entry::Occupied(mut payment) => {
-                                       let res = match payment.get() {
+                                       match payment.get() {
                                                PendingOutboundPayment::Retryable {
                                                        total_msat, keysend_preimage, payment_secret, payment_metadata,
                                                        custom_tlvs, pending_amt_msat, ..
                                                } => {
+                                                       const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
                                                        let retry_amt_msat = route.get_total_amount();
                                                        if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
                                                                log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat);
                                                                abandon_with_entry!(payment, PaymentFailureReason::UnexpectedError);
                                                                return
                                                        }
-                                                       (*total_msat, RecipientOnionFields {
-                                                                       payment_secret: *payment_secret,
-                                                                       payment_metadata: payment_metadata.clone(),
-                                                                       custom_tlvs: custom_tlvs.clone(),
-                                                               }, *keysend_preimage)
+
+                                                       if !payment.get().is_retryable_now() {
+                                                               log_error!(logger, "Retries exhausted for payment id {}", &payment_id);
+                                                               abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted);
+                                                               return
+                                                       }
+
+                                                       let total_msat = *total_msat;
+                                                       let recipient_onion = RecipientOnionFields {
+                                                               payment_secret: *payment_secret,
+                                                               payment_metadata: payment_metadata.clone(),
+                                                               custom_tlvs: custom_tlvs.clone(),
+                                                       };
+                                                       let keysend_preimage = *keysend_preimage;
+
+                                                       let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+                                                       for _ in 0..route.paths.len() {
+                                                               onion_session_privs.push(entropy_source.get_secure_random_bytes());
+                                                       }
+
+                                                       for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+                                                               assert!(payment.get_mut().insert(*session_priv_bytes, path));
+                                                       }
+
+                                                       payment.get_mut().increment_attempts();
+
+                                                       (total_msat, recipient_onion, keysend_preimage, onion_session_privs)
                                                },
                                                PendingOutboundPayment::Legacy { .. } => {
                                                        log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102");
                                                        return
                                                },
+                                               PendingOutboundPayment::AwaitingInvoice { .. } => {
+                                                       log_error!(logger, "Payment not yet sent");
+                                                       return
+                                               },
+                                               PendingOutboundPayment::InvoiceReceived { payment_hash, retry_strategy } => {
+                                                       let total_amount = route_params.final_value_msat;
+                                                       let recipient_onion = RecipientOnionFields {
+                                                               payment_secret: None,
+                                                               payment_metadata: None,
+                                                               custom_tlvs: vec![],
+                                                       };
+                                                       let retry_strategy = Some(*retry_strategy);
+                                                       let payment_params = Some(route_params.payment_params.clone());
+                                                       let (retryable_payment, onion_session_privs) = self.create_pending_payment(
+                                                               *payment_hash, recipient_onion.clone(), None, &route,
+                                                               retry_strategy, payment_params, entropy_source, best_block_height
+                                                       );
+                                                       *payment.into_mut() = retryable_payment;
+                                                       (total_amount, recipient_onion, None, onion_session_privs)
+                                               },
                                                PendingOutboundPayment::Fulfilled { .. } => {
                                                        log_error!(logger, "Payment already completed");
                                                        return
@@ -853,17 +999,7 @@ impl OutboundPayments {
                                                        log_error!(logger, "Payment already abandoned (with some HTLCs still pending)");
                                                        return
                                                },
-                                       };
-                                       if !payment.get().is_retryable_now() {
-                                               log_error!(logger, "Retries exhausted for payment id {}", &payment_id);
-                                               abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted);
-                                               return
                                        }
-                                       payment.get_mut().increment_attempts();
-                                       for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
-                                               assert!(payment.get_mut().insert(*session_priv_bytes, path));
-                                       }
-                                       res
                                },
                                hash_map::Entry::Vacant(_) => {
                                        log_error!(logger, "Payment with ID {} not found", &payment_id);
@@ -897,14 +1033,14 @@ impl OutboundPayments {
                match err {
                        PaymentSendFailure::AllFailedResendSafe(errs) => {
                                Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), logger, pending_events);
-                               self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+                               self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
                        },
                        PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => {
                                Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), logger, pending_events);
                                // Some paths were sent, even if we failed to send the full MPP value our recipient may
                                // misbehave and claim the funds, at which point we have to consider the payment sent, so
                                // return `Ok()` here, ignoring any retry errors.
-                               self.retry_payment_internal(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+                               self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
                        },
                        PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. } => {
                                // This may happen if we send a payment and some paths fail, but only due to a temporary
@@ -976,7 +1112,7 @@ impl OutboundPayments {
                        }))
                }
 
-               let route = Route { paths: vec![path], payment_params: None };
+               let route = Route { paths: vec![path], route_params: None };
                let onion_session_privs = self.add_new_pending_payment(payment_hash,
                        RecipientOnionFields::spontaneous_empty(), payment_id, None, &route, None, None,
                        entropy_source, best_block_height)?;
@@ -1017,36 +1153,70 @@ impl OutboundPayments {
                keysend_preimage: Option<PaymentPreimage>, route: &Route, retry_strategy: Option<Retry>,
                payment_params: Option<PaymentParameters>, entropy_source: &ES, best_block_height: u32
        ) -> Result<Vec<[u8; 32]>, PaymentSendFailure> where ES::Target: EntropySource {
+               let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+               match pending_outbounds.entry(payment_id) {
+                       hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+                       hash_map::Entry::Vacant(entry) => {
+                               let (payment, onion_session_privs) = self.create_pending_payment(
+                                       payment_hash, recipient_onion, keysend_preimage, route, retry_strategy,
+                                       payment_params, entropy_source, best_block_height
+                               );
+                               entry.insert(payment);
+                               Ok(onion_session_privs)
+                       },
+               }
+       }
+
+       fn create_pending_payment<ES: Deref>(
+               &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
+               keysend_preimage: Option<PaymentPreimage>, route: &Route, retry_strategy: Option<Retry>,
+               payment_params: Option<PaymentParameters>, entropy_source: &ES, best_block_height: u32
+       ) -> (PendingOutboundPayment, Vec<[u8; 32]>)
+       where
+               ES::Target: EntropySource,
+       {
                let mut onion_session_privs = Vec::with_capacity(route.paths.len());
                for _ in 0..route.paths.len() {
                        onion_session_privs.push(entropy_source.get_secure_random_bytes());
                }
 
+               let mut payment = PendingOutboundPayment::Retryable {
+                       retry_strategy,
+                       attempts: PaymentAttempts::new(),
+                       payment_params,
+                       session_privs: HashSet::new(),
+                       pending_amt_msat: 0,
+                       pending_fee_msat: Some(0),
+                       payment_hash,
+                       payment_secret: recipient_onion.payment_secret,
+                       payment_metadata: recipient_onion.payment_metadata,
+                       keysend_preimage,
+                       custom_tlvs: recipient_onion.custom_tlvs,
+                       starting_block_height: best_block_height,
+                       total_msat: route.get_total_amount(),
+               };
+
+               for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+                       assert!(payment.insert(*session_priv_bytes, path));
+               }
+
+               (payment, onion_session_privs)
+       }
+
+       #[allow(unused)]
+       pub(super) fn add_new_awaiting_invoice(
+               &self, payment_id: PaymentId, retry_strategy: Retry
+       ) -> Result<(), ()> {
                let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
                match pending_outbounds.entry(payment_id) {
-                       hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+                       hash_map::Entry::Occupied(_) => Err(()),
                        hash_map::Entry::Vacant(entry) => {
-                               let payment = entry.insert(PendingOutboundPayment::Retryable {
+                               entry.insert(PendingOutboundPayment::AwaitingInvoice {
+                                       timer_ticks_without_response: 0,
                                        retry_strategy,
-                                       attempts: PaymentAttempts::new(),
-                                       payment_params,
-                                       session_privs: HashSet::new(),
-                                       pending_amt_msat: 0,
-                                       pending_fee_msat: Some(0),
-                                       payment_hash,
-                                       payment_secret: recipient_onion.payment_secret,
-                                       payment_metadata: recipient_onion.payment_metadata,
-                                       keysend_preimage,
-                                       custom_tlvs: recipient_onion.custom_tlvs,
-                                       starting_block_height: best_block_height,
-                                       total_msat: route.get_total_amount(),
                                });
 
-                               for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
-                                       assert!(payment.insert(*session_priv_bytes, path));
-                               }
-
-                               Ok(onion_session_privs)
+                               Ok(())
                        },
                }
        }
@@ -1064,7 +1234,9 @@ impl OutboundPayments {
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::InvalidRoute{err: "There must be at least one path to send over".to_owned()}));
                }
-               if recipient_onion.payment_secret.is_none() && route.paths.len() > 1 {
+               if recipient_onion.payment_secret.is_none() && route.paths.len() > 1
+                       && !route.paths.iter().any(|p| p.blinded_tail.is_some())
+               {
                        return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_owned()}));
                }
                let mut total_value = 0;
@@ -1075,10 +1247,6 @@ impl OutboundPayments {
                                path_errs.push(Err(APIError::InvalidRoute{err: "Path didn't go anywhere/had bogus size".to_owned()}));
                                continue 'path_check;
                        }
-                       if path.blinded_tail.is_some() {
-                               path_errs.push(Err(APIError::InvalidRoute{err: "Sending to blinded paths isn't supported yet".to_owned()}));
-                               continue 'path_check;
-                       }
                        let dest_hop_idx = if path.blinded_tail.is_some() && path.blinded_tail.as_ref().unwrap().hops.len() > 1 {
                                usize::max_value() } else { path.hops.len() - 1 };
                        for (idx, hop) in path.hops.iter().enumerate() {
@@ -1145,9 +1313,9 @@ impl OutboundPayments {
                                results,
                                payment_id,
                                failed_paths_retry: if pending_amt_unsent != 0 {
-                                       if let Some(payment_params) = &route.payment_params {
+                                       if let Some(payment_params) = route.route_params.as_ref().map(|p| p.payment_params.clone()) {
                                                Some(RouteParameters {
-                                                       payment_params: payment_params.clone(),
+                                                       payment_params: payment_params,
                                                        final_value_msat: pending_amt_unsent,
                                                })
                                        } else { None }
@@ -1256,19 +1424,19 @@ impl OutboundPayments {
                }
        }
 
-       pub(super) fn remove_stale_resolved_payments(&self,
-               pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>)
+       pub(super) fn remove_stale_payments(
+               &self, pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>)
        {
-               // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
-               // from the map. However, if we did that immediately when the last payment HTLC is claimed,
-               // this could race the user making a duplicate send_payment call and our idempotency
-               // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
-               // removal. This should be more than sufficient to ensure the idempotency of any
-               // `send_payment` calls that were made at the same time the `PaymentSent` event was being
-               // processed.
                let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
-               let pending_events = pending_events.lock().unwrap();
+               let mut pending_events = pending_events.lock().unwrap();
                pending_outbound_payments.retain(|payment_id, payment| {
+                       // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
+                       // from the map. However, if we did that immediately when the last payment HTLC is claimed,
+                       // this could race the user making a duplicate send_payment call and our idempotency
+                       // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
+                       // removal. This should be more than sufficient to ensure the idempotency of any
+                       // `send_payment` calls that were made at the same time the `PaymentSent` event was being
+                       // processed.
                        if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment {
                                let mut no_remaining_entries = session_privs.is_empty();
                                if no_remaining_entries {
@@ -1293,6 +1461,16 @@ impl OutboundPayments {
                                        *timer_ticks_without_htlcs = 0;
                                        true
                                }
+                       } else if let PendingOutboundPayment::AwaitingInvoice { timer_ticks_without_response, .. } = payment {
+                               *timer_ticks_without_response += 1;
+                               if *timer_ticks_without_response <= INVOICE_REQUEST_TIMEOUT_TICKS {
+                                       true
+                               } else {
+                                       pending_events.push_back(
+                                               (events::Event::InvoiceRequestFailed { payment_id: *payment_id }, None)
+                                       );
+                                       false
+                               }
                        } else { true }
                });
        }
@@ -1438,6 +1616,11 @@ impl OutboundPayments {
                                        }, None));
                                        payment.remove();
                                }
+                       } else if let PendingOutboundPayment::AwaitingInvoice { .. } = payment.get() {
+                               pending_events.lock().unwrap().push_back((events::Event::InvoiceRequestFailed {
+                                       payment_id,
+                               }, None));
+                               payment.remove();
                        }
                }
        }
@@ -1501,6 +1684,14 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment,
                (1, reason, option),
                (2, payment_hash, required),
        },
+       (5, AwaitingInvoice) => {
+               (0, timer_ticks_without_response, required),
+               (2, retry_strategy, required),
+       },
+       (7, InvoiceReceived) => {
+               (0, payment_hash, required),
+               (2, retry_strategy, required),
+       },
 );
 
 #[cfg(test)]
@@ -1513,10 +1704,13 @@ mod tests {
        use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
        use crate::ln::features::{ChannelFeatures, NodeFeatures};
        use crate::ln::msgs::{ErrorAction, LightningError};
-       use crate::ln::outbound_payment::{OutboundPayments, Retry, RetryableSendFailure};
+       use crate::ln::outbound_payment::{Bolt12PaymentError, INVOICE_REQUEST_TIMEOUT_TICKS, OutboundPayments, Retry, RetryableSendFailure};
+       use crate::offers::invoice::DEFAULT_RELATIVE_EXPIRY;
+       use crate::offers::offer::OfferBuilder;
+       use crate::offers::test_utils::*;
        use crate::routing::gossip::NetworkGraph;
        use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters};
-       use crate::sync::{Arc, Mutex};
+       use crate::sync::{Arc, Mutex, RwLock};
        use crate::util::errors::APIError;
        use crate::util::test_utils;
 
@@ -1555,7 +1749,7 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let logger = test_utils::TestLogger::new();
                let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(network_graph, &scorer);
                let secp_ctx = Secp256k1::new();
                let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
@@ -1565,17 +1759,14 @@ mod tests {
                                PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()),
                                0
                        ).with_expiry_time(past_expiry_time);
-               let expired_route_params = RouteParameters {
-                       payment_params,
-                       final_value_msat: 0,
-               };
+               let expired_route_params = RouteParameters::from_payment_params_and_value(payment_params, 0);
                let pending_events = Mutex::new(VecDeque::new());
                if on_retry {
                        outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(),
-                               PaymentId([0; 32]), None, &Route { paths: vec![], payment_params: None },
+                               PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None },
                                Some(Retry::Attempts(1)), Some(expired_route_params.payment_params.clone()),
                                &&keys_manager, 0).unwrap();
-                       outbound_payments.retry_payment_internal(
+                       outbound_payments.find_route_and_send_payment(
                                PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![],
                                &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger, &pending_events,
                                &|_| Ok(()));
@@ -1602,27 +1793,24 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let logger = test_utils::TestLogger::new();
                let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(network_graph, &scorer);
                let secp_ctx = Secp256k1::new();
                let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
 
                let payment_params = PaymentParameters::from_node_id(
                        PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0);
-               let route_params = RouteParameters {
-                       payment_params,
-                       final_value_msat: 0,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 0);
                router.expect_find_route(route_params.clone(),
                        Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }));
 
                let pending_events = Mutex::new(VecDeque::new());
                if on_retry {
                        outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(),
-                               PaymentId([0; 32]), None, &Route { paths: vec![], payment_params: None },
+                               PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None },
                                Some(Retry::Attempts(1)), Some(route_params.payment_params.clone()),
                                &&keys_manager, 0).unwrap();
-                       outbound_payments.retry_payment_internal(
+                       outbound_payments.find_route_and_send_payment(
                                PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![],
                                &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger, &pending_events,
                                &|_| Ok(()));
@@ -1644,7 +1832,7 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let logger = test_utils::TestLogger::new();
                let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(network_graph, &scorer);
                let secp_ctx = Secp256k1::new();
                let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
@@ -1652,10 +1840,7 @@ mod tests {
                let sender_pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let receiver_pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
                let payment_params = PaymentParameters::from_node_id(sender_pk, 0);
-               let route_params = RouteParameters {
-                       payment_params: payment_params.clone(),
-                       final_value_msat: 0,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params.clone(), 0);
                let failed_scid = 42;
                let route = Route {
                        paths: vec![Path { hops: vec![RouteHop {
@@ -1666,7 +1851,7 @@ mod tests {
                                fee_msat: 0,
                                cltv_expiry_delta: 0,
                        }], blinded_tail: None }],
-                       payment_params: Some(payment_params),
+                       route_params: Some(route_params.clone()),
                };
                router.expect_find_route(route_params.clone(), Ok(route.clone()));
                let mut route_params_w_failed_scid = route_params.clone();
@@ -1719,4 +1904,293 @@ mod tests {
                } else { panic!("Unexpected event"); }
                if let Event::PaymentFailed { .. } = events[1].0 { } else { panic!("Unexpected event"); }
        }
+
+       #[test]
+       fn removes_stale_awaiting_invoice() {
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               for _ in 0..INVOICE_REQUEST_TIMEOUT_TICKS {
+                       outbound_payments.remove_stale_payments(&pending_events);
+                       assert!(outbound_payments.has_pending_payments());
+                       assert!(pending_events.lock().unwrap().is_empty());
+               }
+
+               outbound_payments.remove_stale_payments(&pending_events);
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::InvoiceRequestFailed { payment_id }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_err());
+       }
+
+       #[test]
+       fn removes_abandoned_awaiting_invoice() {
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               outbound_payments.abandon_payment(
+                       payment_id, PaymentFailureReason::UserAbandoned, &pending_events
+               );
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::InvoiceRequestFailed { payment_id }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[cfg(feature = "std")]
+       #[test]
+       fn fails_sending_payment_for_expired_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               let created_at = now() - DEFAULT_RELATIVE_EXPIRY;
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), created_at).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Ok(()),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+
+               let payment_hash = invoice.payment_hash();
+               let reason = Some(PaymentFailureReason::PaymentExpired);
+
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[test]
+       fn fails_finding_route_for_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               router.expect_find_route(
+                       RouteParameters {
+                               payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                               final_value_msat: invoice.amount_msats(),
+                       },
+                       Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }),
+               );
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Ok(()),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+
+               let payment_hash = invoice.payment_hash();
+               let reason = Some(PaymentFailureReason::RouteNotFound);
+
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[test]
+       fn fails_paying_for_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                       final_value_msat: invoice.amount_msats(),
+               };
+               router.expect_find_route(
+                       route_params.clone(), Ok(Route { paths: vec![], route_params: Some(route_params) })
+               );
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Ok(()),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+
+               let payment_hash = invoice.payment_hash();
+               let reason = Some(PaymentFailureReason::UnexpectedError);
+
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[test]
+       fn sends_payment_for_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                       final_value_msat: invoice.amount_msats(),
+               };
+               router.expect_find_route(
+                       route_params.clone(),
+                       Ok(Route {
+                               paths: vec![
+                                       Path {
+                                               hops: vec![
+                                                       RouteHop {
+                                                               pubkey: recipient_pubkey(),
+                                                               node_features: NodeFeatures::empty(),
+                                                               short_channel_id: 42,
+                                                               channel_features: ChannelFeatures::empty(),
+                                                               fee_msat: invoice.amount_msats(),
+                                                               cltv_expiry_delta: 0,
+                                                       }
+                                               ],
+                                               blinded_tail: None,
+                                       }
+                               ],
+                               route_params: Some(route_params),
+                       })
+               );
+
+               assert!(!outbound_payments.has_pending_payments());
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Err(Bolt12PaymentError::UnexpectedInvoice),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(pending_events.lock().unwrap().is_empty());
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| Ok(())
+                       ),
+                       Ok(()),
+               );
+               assert!(outbound_payments.has_pending_payments());
+               assert!(pending_events.lock().unwrap().is_empty());
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Err(Bolt12PaymentError::DuplicateInvoice),
+               );
+               assert!(outbound_payments.has_pending_payments());
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
 }
index c266285713378648fffaebc7606ed22e154b1dcf..d88730c22db058f6f39c0a0f140adc3fdc89dfb5 100644 (file)
@@ -17,11 +17,11 @@ use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose};
 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
-use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
+use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
 use crate::ln::features::Bolt11InvoiceFeatures;
-use crate::ln::{msgs, PaymentSecret, PaymentPreimage};
+use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
 use crate::ln::msgs::ChannelMessageHandler;
-use crate::ln::outbound_payment::Retry;
+use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
 use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
 use crate::routing::scoring::ChannelUsage;
@@ -94,10 +94,7 @@ fn mpp_retry() {
 
        // Initiate the MPP payment.
        let payment_id = PaymentId(payment_hash.0);
-       let mut route_params = RouteParameters {
-               payment_params: route.payment_params.clone().unwrap(),
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = route.route_params.clone().unwrap();
 
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -257,10 +254,8 @@ fn do_test_keysend_payments(public_node: bool, with_retry: bool) {
        }
        let payer_pubkey = nodes[0].node.get_our_node_id();
        let payee_pubkey = nodes[1].node.get_our_node_id();
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-               final_value_msat: 10000,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(
+               PaymentParameters::for_keysend(payee_pubkey, 40, false), 10000);
 
        let network_graph = nodes[0].network_graph.clone();
        let channels = nodes[0].node.list_usable_channels();
@@ -319,10 +314,8 @@ fn test_mpp_keysend() {
        let payer_pubkey = nodes[0].node.get_our_node_id();
        let payee_pubkey = nodes[3].node.get_our_node_id();
        let recv_value = 15_000_000;
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, true),
-               final_value_msat: recv_value,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(
+               PaymentParameters::for_keysend(payee_pubkey, 40, true), recv_value);
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
        let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger,
@@ -531,10 +524,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let amt_msat = 1_000_000;
        let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
        let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
-       let route_params = RouteParameters {
-               payment_params: route.payment_params.clone().unwrap(),
-               final_value_msat: amt_msat,
-       };
+       let route_params = route.route_params.unwrap().clone();
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@ -621,7 +611,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
        check_added_monitors!(nodes[1], 1);
        commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
-       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false);
 
        if confirm_before_reload {
                let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
@@ -673,9 +663,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
                        .unwrap().lock().unwrap();
                let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
-               let mut new_config = channel.context.config();
+               let mut new_config = channel.context().config();
                new_config.forwarding_fee_base_msat += 100_000;
-               channel.context.update_config(&new_config);
+               channel.context_mut().update_config(&new_config);
                new_route.paths[0].hops[0].fee_msat += 100_000;
        }
 
@@ -923,7 +913,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
        nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -1061,7 +1051,7 @@ fn test_fulfill_restart_failure() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
        // pre-fulfill, which we do by serializing it here.
@@ -1111,10 +1101,11 @@ fn get_ldk_payment_preimage() {
        let scorer = test_utils::TestScorer::new();
        let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
-       let route = get_route(
-               &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
-               Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
-               amt_msat, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(),
+               Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()), nodes[0].logger,
+               &scorer, &(), &random_seed_bytes).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
                RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@ -1224,7 +1215,7 @@ fn failed_probe_yields_event() {
 
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
 
-       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000);
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 9_998_000);
 
        let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
 
@@ -1273,7 +1264,7 @@ fn onchain_failed_probe_yields_event() {
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
 
        // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
-       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 1_000);
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 1_000);
        let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
 
        // node[0] -- update_add_htlcs -> node[1]
@@ -1468,7 +1459,7 @@ fn test_trivial_inflight_htlc_tracking(){
        let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
 
        // Send and claim the payment. Inflight HTLCs should be empty.
-       let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1;
+       let (_, payment_hash, _, payment_id) = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
        let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
                let mut node_0_per_peer_lock;
@@ -1478,7 +1469,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.context.get_short_channel_id().unwrap()
+                       channel_1.context().get_short_channel_id().unwrap()
                );
                assert_eq!(chan_1_used_liquidity, None);
        }
@@ -1490,14 +1481,14 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.context.get_short_channel_id().unwrap()
+                       channel_2.context().get_short_channel_id().unwrap()
                );
 
                assert_eq!(chan_2_used_liquidity, None);
        }
        let pending_payments = nodes[0].node.list_recent_payments();
        assert_eq!(pending_payments.len(), 1);
-       assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash) });
+       assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash), payment_id });
 
        // Remove fulfilled payment
        for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
@@ -1505,7 +1496,7 @@ fn test_trivial_inflight_htlc_tracking(){
        }
 
        // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
-       let (payment_preimage, payment_hash,  _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
+       let (payment_preimage, payment_hash,  _, payment_id) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
        let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
                let mut node_0_per_peer_lock;
@@ -1515,7 +1506,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.context.get_short_channel_id().unwrap()
+                       channel_1.context().get_short_channel_id().unwrap()
                );
                // First hop accounts for expected 1000 msat fee
                assert_eq!(chan_1_used_liquidity, Some(501000));
@@ -1528,14 +1519,14 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.context.get_short_channel_id().unwrap()
+                       channel_2.context().get_short_channel_id().unwrap()
                );
 
                assert_eq!(chan_2_used_liquidity, Some(500000));
        }
        let pending_payments = nodes[0].node.list_recent_payments();
        assert_eq!(pending_payments.len(), 1);
-       assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_hash, total_msat: 500000 });
+       assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_id, payment_hash, total_msat: 500000 });
 
        // Now, let's claim the payment. This should result in the used liquidity to return `None`.
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
@@ -1554,7 +1545,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.context.get_short_channel_id().unwrap()
+                       channel_1.context().get_short_channel_id().unwrap()
                );
                assert_eq!(chan_1_used_liquidity, None);
        }
@@ -1566,7 +1557,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.context.get_short_channel_id().unwrap()
+                       channel_2.context().get_short_channel_id().unwrap()
                );
                assert_eq!(chan_2_used_liquidity, None);
        }
@@ -1607,7 +1598,7 @@ fn test_holding_cell_inflight_htlcs() {
                let used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel.context.get_short_channel_id().unwrap()
+                       channel.context().get_short_channel_id().unwrap()
                );
 
                assert_eq!(used_liquidity, Some(2000000));
@@ -1661,15 +1652,10 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                        }])
                ]).unwrap()
                .with_bolt11_features(nodes[2].node.invoice_features()).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
-       let route = get_route(
-               &nodes[0].node.get_our_node_id(), &route_params.payment_params,
-               &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes,
-       ).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat,);
+       let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &(),
+               &random_seed_bytes,).unwrap();
 
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
@@ -1703,9 +1689,9 @@ fn do_test_intercepted_payment(test: InterceptTest) {
        };
 
        // Check for unknown channel id error.
-       let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
+       let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
        assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  {
-               err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+               err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
                        log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
 
        if test == InterceptTest::Fail {
@@ -1731,8 +1717,8 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
                let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
                assert_eq!(unusable_chan_err , APIError::ChannelUnavailable {
-                       err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
-                               log_bytes!(temp_chan_id), nodes[2].node.get_our_node_id()) });
+                       err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
+                               temp_chan_id, nodes[2].node.get_our_node_id()) });
                assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
 
                // Open the just-in-time channel so the payment can then be forwarded.
@@ -1850,10 +1836,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) {
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_route_hints(route_hints).unwrap()
                .with_bolt11_features(nodes[2].node.invoice_features()).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
@@ -1980,10 +1963,7 @@ fn do_automatic_retries(test: AutoRetry) {
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
        let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
 
        macro_rules! pass_failed_attempt_with_retry_along_path {
@@ -2199,10 +2179,7 @@ fn auto_retry_partial_failure() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
        // second (for the initial send path2 over chan_2) fails.
@@ -2234,7 +2211,7 @@ fn auto_retry_partial_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(route_params.payment_params.clone()),
+               route_params: Some(route_params.clone()),
        };
        let retry_1_route = Route {
                paths: vec![
@@ -2255,7 +2232,7 @@ fn auto_retry_partial_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(route_params.payment_params.clone()),
+               route_params: Some(route_params.clone()),
        };
        let retry_2_route = Route {
                paths: vec![
@@ -2268,19 +2245,19 @@ fn auto_retry_partial_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(route_params.payment_params.clone()),
+               route_params: Some(route_params.clone()),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
        let mut payment_params = route_params.payment_params.clone();
        payment_params.previously_failed_channels.push(chan_2_id);
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params, final_value_msat: amt_msat / 2,
-               }, Ok(retry_1_route));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2),
+               Ok(retry_1_route));
        let mut payment_params = route_params.payment_params.clone();
        payment_params.previously_failed_channels.push(chan_3_id);
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params, final_value_msat: amt_msat / 4,
-               }, Ok(retry_2_route));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4),
+               Ok(retry_2_route));
 
        // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -2416,10 +2393,7 @@ fn auto_retry_zero_attempts_send_error() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -2456,10 +2430,7 @@ fn fails_paying_after_rejected_by_payee() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -2503,10 +2474,8 @@ fn retry_multi_path_single_failed_payment() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params: payment_params.clone(),
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(
+               payment_params.clone(), amt_msat);
 
        let chans = nodes[0].node.list_usable_channels();
        let mut route = Route {
@@ -2528,23 +2497,22 @@ fn retry_multi_path_single_failed_payment() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(payment_params),
+               route_params: Some(route_params.clone()),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        // On retry, split the payment across both channels.
        route.paths[0].hops[0].fee_msat = 50_000_001;
        route.paths[1].hops[0].fee_msat = 50_000_000;
-       let mut pay_params = route.payment_params.clone().unwrap();
+       let mut pay_params = route.route_params.clone().unwrap().payment_params;
        pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap());
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: pay_params,
-                       // Note that the second request here requests the amount we originally failed to send,
-                       // not the amount remaining on the full payment, which should be changed.
-                       final_value_msat: 100_000_001,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               // Note that the second request here requests the amount we originally failed to send,
+               // not the amount remaining on the full payment, which should be changed.
+               RouteParameters::from_payment_params_and_value(pay_params, 100_000_001),
+               Ok(route.clone()));
 
        {
-               let scorer = chanmon_cfgs[0].scorer.lock().unwrap();
+               let scorer = chanmon_cfgs[0].scorer.read().unwrap();
                // The initial send attempt, 2 paths
                scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
                scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
@@ -2596,10 +2564,7 @@ fn immediate_retry_on_failure() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let chans = nodes[0].node.list_usable_channels();
        let mut route = Route {
@@ -2613,7 +2578,9 @@ fn immediate_retry_on_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       100_000_001)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        // On retry, split the payment across both channels.
@@ -2623,9 +2590,9 @@ fn immediate_retry_on_failure() {
        route.paths[1].hops[0].fee_msat = 50_000_001;
        let mut pay_params = route_params.payment_params.clone();
        pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: pay_params, final_value_msat: amt_msat,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(pay_params, amt_msat),
+               Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -2684,10 +2651,7 @@ fn no_extra_retries_on_back_to_back_fail() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let mut route = Route {
                paths: vec![
@@ -2722,7 +2686,9 @@ fn no_extra_retries_on_back_to_back_fail() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None }
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       100_000_000)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        let mut second_payment_params = route_params.payment_params.clone();
@@ -2730,10 +2696,9 @@ fn no_extra_retries_on_back_to_back_fail() {
        // On retry, we'll only return one path
        route.paths.remove(1);
        route.paths[0].hops[1].fee_msat = amt_msat;
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: second_payment_params,
-                       final_value_msat: amt_msat,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat),
+               Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -2886,10 +2851,7 @@ fn test_simple_partial_retry() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let mut route = Route {
                paths: vec![
@@ -2924,17 +2886,18 @@ fn test_simple_partial_retry() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None }
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       100_000_000)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        let mut second_payment_params = route_params.payment_params.clone();
        second_payment_params.previously_failed_channels = vec![chan_2_scid];
        // On retry, we'll only be asked for one path (or 100k sats)
        route.paths.remove(0);
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: second_payment_params,
-                       final_value_msat: amt_msat / 2,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2),
+               Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -3052,10 +3015,7 @@ fn test_threaded_payment_retries() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let mut route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let mut route = Route {
                paths: vec![
@@ -3090,7 +3050,9 @@ fn test_threaded_payment_retries() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None }
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       amt_msat - amt_msat / 1000)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
 
@@ -3197,7 +3159,7 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint:
                nodes_0_serialized = nodes[0].node.encode();
        }
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        if persist_manager_with_payment {
                nodes_0_serialized = nodes[0].node.encode();
@@ -3303,11 +3265,10 @@ fn do_claim_from_closed_chan(fail_payment: bool) {
        create_announced_chan_between_nodes(&nodes, 2, 3);
 
        let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]);
-       let mut route_params = RouteParameters {
-               payment_params: PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+       let mut route_params = RouteParameters::from_payment_params_and_value(
+               PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
                        .with_bolt11_features(nodes[1].node.invoice_features()).unwrap(),
-               final_value_msat: 10_000_000,
-       };
+               10_000_000);
        let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params,
                None, nodes[0].node.compute_inflight_htlcs()).unwrap();
        // Make sure the route is ordered as the B->D path before C->D
@@ -3517,10 +3478,7 @@ fn test_retry_custom_tlvs() {
 
        // Initiate the payment
        let payment_id = PaymentId(payment_hash.0);
-       let mut route_params = RouteParameters {
-               payment_params: route.payment_params.clone().unwrap(),
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = route.route_params.clone().unwrap();
 
        let custom_tlvs = vec![((1 << 16) + 1, vec![0x42u8; 16])];
        let onion_fields = RecipientOnionFields::secret_only(payment_secret);
@@ -3772,10 +3730,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
 
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
-       let mut route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        // Send the MPP payment, delivering the updated commitment state to nodes[1].
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields {
index 0e4f4a13d9b30f009890713e24df11e655cf065c..1e0294073a25d2b01b470bbbbad16d9f9ab3d305 100644 (file)
@@ -20,9 +20,10 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
 use crate::sign::{KeysManager, NodeSigner, Recipient};
 use crate::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
+use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
 use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use crate::util::ser::{VecWriter, Writeable, Writer};
 use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
@@ -186,7 +187,7 @@ impl ErroringMessageHandler {
        pub fn new() -> Self {
                Self { message_queue: Mutex::new(Vec::new()) }
        }
-       fn push_error(&self, node_id: &PublicKey, channel_id: [u8; 32]) {
+       fn push_error(&self, node_id: &PublicKey, channel_id: ChannelId) {
                self.message_queue.lock().unwrap().push(MessageSendEvent::HandleError {
                        action: msgs::ErrorAction::SendErrorMessage {
                                msg: msgs::ErrorMessage { channel_id, data: "We do not support channel messages, sorry.".to_owned() },
@@ -482,7 +483,7 @@ struct Peer {
        /// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
        /// check this.
        their_features: Option<InitFeatures>,
-       their_net_address: Option<NetAddress>,
+       their_socket_address: Option<SocketAddress>,
 
        pending_outbound_buffer: LinkedList<Vec<u8>>,
        pending_outbound_buffer_first_msg_offset: usize,
@@ -854,28 +855,28 @@ impl core::fmt::Display for OptionalFromDebugger<'_> {
 /// A function used to filter out local or private addresses
 /// <https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml>
 /// <https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml>
-fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
+fn filter_addresses(ip_address: Option<SocketAddress>) -> Option<SocketAddress> {
        match ip_address{
                // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
-               Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [10, _, _, _], port: _}) => None,
                // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
-               Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [0, _, _, _], port: _}) => None,
                // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
-               Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [100, 64..=127, _, _], port: _}) => None,
                // For IPv4 range       127.0.0.0 - 127.255.255.255 (127/8)
-               Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [127, _, _, _], port: _}) => None,
                // For IPv4 range       169.254.0.0 - 169.254.255.255 (169.254/16)
-               Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [169, 254, _, _], port: _}) => None,
                // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
-               Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [172, 16..=31, _, _], port: _}) => None,
                // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
-               Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [192, 168, _, _], port: _}) => None,
                // For IPv4 range 192.88.99.0 - 192.88.99.255  (192.88.99/24)
-               Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [192, 88, 99, _], port: _}) => None,
                // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
-               Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
+               Some(SocketAddress::TcpIpV6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
                // For remaining addresses
-               Some(NetAddress::IPv6{addr: _, port: _}) => None,
+               Some(SocketAddress::TcpIpV6{addr: _, port: _}) => None,
                Some(..) => ip_address,
                None => None,
        }
@@ -932,14 +933,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        ///
        /// The returned `Option`s will only be `Some` if an address had been previously given via
        /// [`Self::new_outbound_connection`] or [`Self::new_inbound_connection`].
-       pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<NetAddress>)> {
+       pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<SocketAddress>)> {
                let peers = self.peers.read().unwrap();
                peers.values().filter_map(|peer_mutex| {
                        let p = peer_mutex.lock().unwrap();
                        if !p.handshake_complete() {
                                return None;
                        }
-                       Some((p.their_node_id.unwrap().0, p.their_net_address.clone()))
+                       Some((p.their_node_id.unwrap().0, p.their_socket_address.clone()))
                }).collect()
        }
 
@@ -972,7 +973,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// [`socket_disconnected`].
        ///
        /// [`socket_disconnected`]: PeerManager::socket_disconnected
-       pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
+       pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<SocketAddress>) -> Result<Vec<u8>, PeerHandleError> {
                let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
                let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
                let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
@@ -988,7 +989,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        channel_encryptor: peer_encryptor,
                                        their_node_id: None,
                                        their_features: None,
-                                       their_net_address: remote_network_address,
+                                       their_socket_address: remote_network_address,
 
                                        pending_outbound_buffer: LinkedList::new(),
                                        pending_outbound_buffer_first_msg_offset: 0,
@@ -1029,7 +1030,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// [`socket_disconnected`].
        ///
        /// [`socket_disconnected`]: PeerManager::socket_disconnected
-       pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
+       pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<SocketAddress>) -> Result<(), PeerHandleError> {
                let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.node_signer);
                let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
 
@@ -1044,7 +1045,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        channel_encryptor: peer_encryptor,
                                        their_node_id: None,
                                        their_features: None,
-                                       their_net_address: remote_network_address,
+                                       their_socket_address: remote_network_address,
 
                                        pending_outbound_buffer: LinkedList::new(),
                                        pending_outbound_buffer_first_msg_offset: 0,
@@ -1367,7 +1368,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                insert_node_id!();
                                                                let features = self.init_features(&their_node_id);
                                                                let networks = self.message_handler.chan_handler.get_genesis_hashes();
-                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
                                                        },
@@ -1380,7 +1381,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                insert_node_id!();
                                                                let features = self.init_features(&their_node_id);
                                                                let networks = self.message_handler.chan_handler.get_genesis_hashes();
-                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
                                                        },
@@ -1420,13 +1421,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                }
                                                                                                (msgs::DecodeError::UnsupportedCompression, _) => {
                                                                                                        log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
-                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() });
+                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() });
                                                                                                        continue;
                                                                                                }
                                                                                                (_, Some(ty)) if is_gossip_msg(ty) => {
                                                                                                        log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage {
-                                                                                                               channel_id: [0; 32],
+                                                                                                               channel_id: ChannelId::new_zero(),
                                                                                                                data: format!("Unreadable/bogus gossip message of type {}", ty),
                                                                                                        });
                                                                                                        continue;
@@ -1592,7 +1593,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        wire::Message::Error(msg) => {
                                log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
                                self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
-                               if msg.channel_id == [0; 32] {
+                               if msg.channel_id.is_zero() {
                                        return Err(PeerHandleError { }.into());
                                }
                        },
@@ -1913,31 +1914,31 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id),
+                                                                       &msg.temporary_channel_id,
                                                                        log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
                                                        // TODO: If the peer is gone we should generate a DiscardFunding event
                                                        // indicating to the wallet that they should just throw away this funding transaction
@@ -1946,73 +1947,73 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
@@ -2021,7 +2022,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        update_add_htlcs.len(),
                                                                        update_fulfill_htlcs.len(),
                                                                        update_fail_htlcs.len(),
-                                                                       log_bytes!(commitment_signed.channel_id));
+                                                                       &commitment_signed.channel_id);
                                                        let mut peer = get_peer_for_forwarding!(node_id);
                                                        for msg in update_add_htlcs {
                                                                self.enqueue_message(&mut *peer, msg);
@@ -2043,25 +2044,25 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
@@ -2398,7 +2399,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        // be absurd. We ensure this by checking that at least 100 (our stated public contract on when
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
-       const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+       const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
        #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
@@ -2420,7 +2421,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// Panics if `addresses` is absurdly large (more than 100).
        ///
        /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
-       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
+       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<SocketAddress>) {
                if addresses.len() > 100 {
                        panic!("More than half the message size was taken up by public addresses!");
                }
@@ -2482,11 +2483,12 @@ mod tests {
        use crate::sign::{NodeSigner, Recipient};
        use crate::events;
        use crate::io;
+       use crate::ln::ChannelId;
        use crate::ln::features::{InitFeatures, NodeFeatures};
        use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
        use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
        use crate::ln::{msgs, wire};
-       use crate::ln::msgs::{LightningError, NetAddress};
+       use crate::ln::msgs::{LightningError, SocketAddress};
        use crate::util::test_utils;
 
        use bitcoin::Network;
@@ -2645,13 +2647,13 @@ mod tests {
                        fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                        disconnect: Arc::new(AtomicBool::new(false)),
                };
-               let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+               let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap();
                let mut fd_b = FileDescriptor {
                        fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                        disconnect: Arc::new(AtomicBool::new(false)),
                };
-               let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+               let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
@@ -2696,12 +2698,12 @@ mod tests {
                                                fd: $id  + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                                disconnect: Arc::new(AtomicBool::new(false)),
                                        };
-                                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                                       let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                                        let mut fd_b = FileDescriptor {
                                                fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                                disconnect: Arc::new(AtomicBool::new(false)),
                                        };
-                                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                                       let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                                        let initial_data = peers[1].new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                                        peers[0].new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                                        if peers[0].read_event(&mut fd_a, &initial_data).is_err() { break; }
@@ -2721,7 +2723,7 @@ mod tests {
                                                        .push(crate::events::MessageSendEvent::SendShutdown {
                                                                node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(),
                                                                msg: msgs::Shutdown {
-                                                                       channel_id: [0; 32],
+                                                                       channel_id: ChannelId::new_zero(),
                                                                        scriptpubkey: bitcoin::Script::new(),
                                                                },
                                                        });
@@ -2729,7 +2731,7 @@ mod tests {
                                                        .push(crate::events::MessageSendEvent::SendShutdown {
                                                                node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(),
                                                                msg: msgs::Shutdown {
-                                                                       channel_id: [0; 32],
+                                                                       channel_id: ChannelId::new_zero(),
                                                                        scriptpubkey: bitcoin::Script::new(),
                                                                },
                                                        });
@@ -2768,12 +2770,12 @@ mod tests {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                       let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                        let mut fd_b = FileDescriptor {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                       let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                        let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                        peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                        assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
@@ -2804,12 +2806,12 @@ mod tests {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                       let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                        let mut fd_b = FileDescriptor {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                       let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                        let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                        peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                        assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
@@ -2858,7 +2860,7 @@ mod tests {
 
                let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap();
 
-               let msg = msgs::Shutdown { channel_id: [42; 32], scriptpubkey: bitcoin::Script::new() };
+               let msg = msgs::Shutdown { channel_id: ChannelId::from_bytes([42; 32]), scriptpubkey: bitcoin::Script::new() };
                a_chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::SendShutdown {
                        node_id: their_id, msg: msg.clone()
                });
@@ -2886,7 +2888,7 @@ mod tests {
                        fd: 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
                        disconnect: Arc::new(AtomicBool::new(false)),
                };
-               let addr_dup = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1003};
+               let addr_dup = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1003};
                let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap();
                peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap();
 
@@ -3024,91 +3026,91 @@ mod tests {
                // Tests the filter_addresses function.
 
                // For (10/8)
-               let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 255, 201], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [10, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (0/8)
-               let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 255, 187], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [0, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (100.64/10)
-               let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [100, 64, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [100, 78, 255, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [100, 127, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (127/8)
-               let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [127, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [127, 65, 73, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [127, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (169.254/16)
-               let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 221, 101], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (172.16/12)
-               let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [172, 16, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [172, 27, 101, 23], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [172, 31, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (192.168/16)
-               let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 205, 159], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (192.88.99/24)
-               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 140], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For other IPv4 addresses
-               let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [188, 255, 99, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [123, 8, 129, 14], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [2, 88, 9, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
 
                // For (2000::/3)
-               let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
 
                // For other IPv6 addresses
-               let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (None)
index c452630e184529969dd6f45a5f33fd5f9fd4b0f0..c3a428ae0147d85ceb8ddf2bef58cfc75a9ff34c 100644 (file)
@@ -18,7 +18,7 @@ use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, Mes
 use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -332,8 +332,8 @@ fn test_simple_manager_serialize_deserialize() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
-       let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (our_payment_preimage, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
@@ -371,7 +371,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                node_0_stale_monitors_serialized.push(writer.0);
        }
 
-       let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
+       let (our_payment_preimage, ..) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
 
        // Serialize the ChannelManager here, but the monitor we keep up-to-date
        let nodes_0_serialized = nodes[0].node.encode();
@@ -399,7 +399,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_stale_monitors = Vec::new();
        for serialized in node_0_stale_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_stale_monitors.push(monitor);
        }
@@ -407,7 +407,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_monitors = Vec::new();
        for serialized in node_0_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_monitors.push(monitor);
        }
@@ -1063,7 +1063,7 @@ fn removed_payment_no_manager_persistence() {
        let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
-       let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
        let node_encoded = nodes[1].node.encode();
 
index 35399b7dae4b63b9ae50e3a8151804fe16792219..7c57b553068ada64d561f805a53013569254d68b 100644 (file)
@@ -55,7 +55,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
        connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
        // Provide preimage to node 2 by claiming payment
        nodes[2].node.claim_funds(our_payment_preimage);
@@ -423,8 +423,8 @@ fn test_set_outpoints_partial_claiming() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
        let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
index f975e18412967bc07a6058572091cf0377eead1f..f105edc19c05be35d1dceb1480681b35651172f7 100644 (file)
@@ -11,9 +11,9 @@
 
 use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::transaction::OutPoint;
-use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use crate::events::{MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails};
-use crate::routing::router::{PaymentParameters, get_route};
+use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
 use crate::ln::script::ShutdownScript;
@@ -122,7 +122,7 @@ fn expect_channel_shutdown_state_with_htlc() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
        let _chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
        expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
@@ -209,7 +209,7 @@ fn test_lnd_bug_6039() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+       let (payment_preimage, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -299,7 +299,7 @@ fn updates_shutdown_wait() {
        let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
-       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -313,9 +313,14 @@ fn updates_shutdown_wait() {
        let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
 
        let payment_params_1 = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
-       let route_1 = get_route(&nodes[0].node.get_our_node_id(), &payment_params_1, &nodes[0].network_graph.read_only(), None, 100000, &logger, &scorer, &(), &random_seed_bytes).unwrap();
-       let payment_params_2 = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
-       let route_2 = get_route(&nodes[1].node.get_our_node_id(), &payment_params_2, &nodes[1].network_graph.read_only(), None, 100000, &logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params_1, 100_000);
+       let route_1 = get_route(&nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(), None, &logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let payment_params_2 = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(),
+               TEST_FINAL_CLTV).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params_2, 100_000);
+       let route_2 = get_route(&nodes[1].node.get_our_node_id(), &route_params,
+               &nodes[1].network_graph.read_only(), None, &logger, &scorer, &(), &random_seed_bytes).unwrap();
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route_1, payment_hash,
                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
                ), true, APIError::ChannelUnavailable {..}, {});
@@ -455,7 +460,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        nodes[1].node.close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@@ -1057,7 +1062,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                {
                        let mut node_0_per_peer_lock;
                        let mut node_0_peer_state_lock;
-                       get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context.closing_fee_limits.as_mut().unwrap().1 *= 10;
+                       get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10;
                }
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
                let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
index 75a844cd117abe5d956ddcbdf7efde5bff44a769..908d2d4bee6d2f6b9091ae0d62ce7e650cbfe754 100644 (file)
@@ -110,6 +110,7 @@ use core::time::Duration;
 use crate::io;
 use crate::blinded_path::BlindedPath;
 use crate::ln::PaymentHash;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures, InvoiceRequestFeatures, OfferFeatures};
 use crate::ln::inbound_payment::ExpandedKey;
 use crate::ln::msgs::DecodeError;
@@ -128,7 +129,7 @@ use crate::prelude::*;
 #[cfg(feature = "std")]
 use std::time::SystemTime;
 
-const DEFAULT_RELATIVE_EXPIRY: Duration = Duration::from_secs(7200);
+pub(crate) const DEFAULT_RELATIVE_EXPIRY: Duration = Duration::from_secs(7200);
 
 /// Tag for the hash function used when signing a [`Bolt12Invoice`]'s merkle root.
 pub const SIGNATURE_TAG: &'static str = concat!("lightning", "invoice", "signature");
@@ -695,10 +696,11 @@ impl Bolt12Invoice {
                merkle::message_digest(SIGNATURE_TAG, &self.bytes).as_ref().clone()
        }
 
-       /// Verifies that the invoice was for a request or refund created using the given key.
+       /// Verifies that the invoice was for a request or refund created using the given key. Returns
+       /// the associated [`PaymentId`] to use when sending the payment.
        pub fn verify<T: secp256k1::Signing>(
                &self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
-       ) -> bool {
+       ) -> Result<PaymentId, ()> {
                self.contents.verify(TlvStream::new(&self.bytes), key, secp_ctx)
        }
 
@@ -947,7 +949,7 @@ impl InvoiceContents {
 
        fn verify<T: secp256k1::Signing>(
                &self, tlv_stream: TlvStream<'_>, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
-       ) -> bool {
+       ) -> Result<PaymentId, ()> {
                let offer_records = tlv_stream.clone().range(OFFER_TYPES);
                let invreq_records = tlv_stream.range(INVOICE_REQUEST_TYPES).filter(|record| {
                        match record.r#type {
@@ -967,10 +969,7 @@ impl InvoiceContents {
                        },
                };
 
-               match signer::verify_metadata(metadata, key, iv_bytes, payer_id, tlv_stream, secp_ctx) {
-                       Ok(_) => true,
-                       Err(()) => false,
-               }
+               signer::verify_payer_metadata(metadata, key, iv_bytes, payer_id, tlv_stream, secp_ctx)
        }
 
        fn derives_keys(&self) -> bool {
@@ -1642,36 +1641,31 @@ mod tests {
                        .build().unwrap()
                        .sign(payer_sign).unwrap();
 
-               if let Err(e) = invoice_request
-                       .verify_and_respond_using_derived_keys_no_std(
-                               payment_paths(), payment_hash(), now(), &expanded_key, &secp_ctx
-                       )
-                       .unwrap()
+               if let Err(e) = invoice_request.clone()
+                       .verify(&expanded_key, &secp_ctx).unwrap()
+                       .respond_using_derived_keys_no_std(payment_paths(), payment_hash(), now()).unwrap()
                        .build_and_sign(&secp_ctx)
                {
                        panic!("error building invoice: {:?}", e);
                }
 
                let expanded_key = ExpandedKey::new(&KeyMaterial([41; 32]));
-               match invoice_request.verify_and_respond_using_derived_keys_no_std(
-                       payment_paths(), payment_hash(), now(), &expanded_key, &secp_ctx
-               ) {
-                       Ok(_) => panic!("expected error"),
-                       Err(e) => assert_eq!(e, Bolt12SemanticError::InvalidMetadata),
-               }
+               assert!(invoice_request.verify(&expanded_key, &secp_ctx).is_err());
 
                let desc = "foo".to_string();
                let offer = OfferBuilder
                        ::deriving_signing_pubkey(desc, node_id, &expanded_key, &entropy, &secp_ctx)
                        .amount_msats(1000)
+                       // Omit the path so that node_id is used for the signing pubkey instead of deriving
                        .build().unwrap();
                let invoice_request = offer.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
                        .build().unwrap()
                        .sign(payer_sign).unwrap();
 
-               match invoice_request.verify_and_respond_using_derived_keys_no_std(
-                       payment_paths(), payment_hash(), now(), &expanded_key, &secp_ctx
-               ) {
+               match invoice_request
+                       .verify(&expanded_key, &secp_ctx).unwrap()
+                       .respond_using_derived_keys_no_std(payment_paths(), payment_hash(), now())
+               {
                        Ok(_) => panic!("expected error"),
                        Err(e) => assert_eq!(e, Bolt12SemanticError::InvalidMetadata),
                }
index 03af068d1d61912738f23ac71ff3c91079161ffe..fb0b0205bd689e1356b65ae5f7c79690479a55f5 100644 (file)
@@ -64,6 +64,7 @@ use crate::sign::EntropySource;
 use crate::io;
 use crate::blinded_path::BlindedPath;
 use crate::ln::PaymentHash;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::InvoiceRequestFeatures;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::ln::msgs::DecodeError;
@@ -128,10 +129,12 @@ impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerI
        }
 
        pub(super) fn deriving_metadata<ES: Deref>(
-               offer: &'a Offer, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES
+               offer: &'a Offer, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
+               payment_id: PaymentId,
        ) -> Self where ES::Target: EntropySource {
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let payment_id = Some(payment_id);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, payment_id);
                let metadata = Metadata::Derived(derivation_material);
                Self {
                        offer,
@@ -145,10 +148,12 @@ impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerI
 
 impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T> {
        pub(super) fn deriving_payer_id<ES: Deref>(
-               offer: &'a Offer, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>
+               offer: &'a Offer, expanded_key: &ExpandedKey, entropy_source: ES,
+               secp_ctx: &'b Secp256k1<T>, payment_id: PaymentId
        ) -> Self where ES::Target: EntropySource {
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let payment_id = Some(payment_id);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, payment_id);
                let metadata = Metadata::DerivedSigningPubkey(derivation_material);
                Self {
                        offer,
@@ -259,7 +264,7 @@ impl<'a, 'b, P: PayerIdStrategy, T: secp256k1::Signing> InvoiceRequestBuilder<'a
                        let mut tlv_stream = self.invoice_request.as_tlv_stream();
                        debug_assert!(tlv_stream.2.payer_id.is_none());
                        tlv_stream.0.metadata = None;
-                       if !metadata.derives_keys() {
+                       if !metadata.derives_payer_keys() {
                                tlv_stream.2.payer_id = self.payer_id.as_ref();
                        }
 
@@ -424,6 +429,24 @@ pub struct InvoiceRequest {
        signature: Signature,
 }
 
+/// An [`InvoiceRequest`] that has been verified by [`InvoiceRequest::verify`] and exposes different
+/// ways to respond depending on whether the signing keys were derived.
+#[derive(Clone, Debug)]
+pub struct VerifiedInvoiceRequest {
+       /// The verified request.
+       inner: InvoiceRequest,
+
+       /// Keys used for signing a [`Bolt12Invoice`] if they can be derived.
+       ///
+       /// If `Some`, must call [`respond_using_derived_keys`] when responding. Otherwise, call
+       /// [`respond_with`].
+       ///
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+       /// [`respond_using_derived_keys`]: Self::respond_using_derived_keys
+       /// [`respond_with`]: Self::respond_with
+       pub keys: Option<KeyPair>,
+}
+
 /// The contents of an [`InvoiceRequest`], which may be shared with an [`Bolt12Invoice`].
 ///
 /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
@@ -542,9 +565,15 @@ impl InvoiceRequest {
        ///
        /// Errors if the request contains unknown required features.
        ///
+       /// # Note
+       ///
+       /// If the originating [`Offer`] was created using [`OfferBuilder::deriving_signing_pubkey`],
+       /// then use [`InvoiceRequest::verify`] and [`VerifiedInvoiceRequest`] methods instead.
+       ///
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Bolt12Invoice::created_at`]: crate::offers::invoice::Bolt12Invoice::created_at
+       /// [`OfferBuilder::deriving_signing_pubkey`]: crate::offers::offer::OfferBuilder::deriving_signing_pubkey
        pub fn respond_with_no_std(
                &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
                created_at: core::time::Duration
@@ -556,6 +585,63 @@ impl InvoiceRequest {
                InvoiceBuilder::for_offer(self, payment_paths, created_at, payment_hash)
        }
 
+       /// Verifies that the request was for an offer created using the given key. Returns the verified
+       /// request which contains the derived keys needed to sign a [`Bolt12Invoice`] for the request
+       /// if they could be extracted from the metadata.
+       ///
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+       pub fn verify<T: secp256k1::Signing>(
+               self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+       ) -> Result<VerifiedInvoiceRequest, ()> {
+               let keys = self.contents.inner.offer.verify(&self.bytes, key, secp_ctx)?;
+               Ok(VerifiedInvoiceRequest {
+                       inner: self,
+                       keys,
+               })
+       }
+
+       #[cfg(test)]
+       fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
+               let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream) =
+                       self.contents.as_tlv_stream();
+               let signature_tlv_stream = SignatureTlvStreamRef {
+                       signature: Some(&self.signature),
+               };
+               (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream, signature_tlv_stream)
+       }
+}
+
+impl VerifiedInvoiceRequest {
+       offer_accessors!(self, self.inner.contents.inner.offer);
+       invoice_request_accessors!(self, self.inner.contents);
+
+       /// Creates an [`InvoiceBuilder`] for the request with the given required fields and using the
+       /// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
+       ///
+       /// See [`InvoiceRequest::respond_with_no_std`] for further details.
+       ///
+       /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+       ///
+       /// [`Duration`]: core::time::Duration
+       #[cfg(feature = "std")]
+       pub fn respond_with(
+               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
+       ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
+               self.inner.respond_with(payment_paths, payment_hash)
+       }
+
+       /// Creates an [`InvoiceBuilder`] for the request with the given required fields.
+       ///
+       /// See [`InvoiceRequest::respond_with_no_std`] for further details.
+       ///
+       /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+       pub fn respond_with_no_std(
+               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+               created_at: core::time::Duration
+       ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
+               self.inner.respond_with_no_std(payment_paths, payment_hash, created_at)
+       }
+
        /// Creates an [`InvoiceBuilder`] for the request using the given required fields and that uses
        /// derived signing keys from the originating [`Offer`] to sign the [`Bolt12Invoice`]. Must use
        /// the same [`ExpandedKey`] as the one used to create the offer.
@@ -566,17 +652,14 @@ impl InvoiceRequest {
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        #[cfg(feature = "std")]
-       pub fn verify_and_respond_using_derived_keys<T: secp256k1::Signing>(
-               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
-               expanded_key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+       pub fn respond_using_derived_keys(
+               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
        ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError> {
                let created_at = std::time::SystemTime::now()
                        .duration_since(std::time::SystemTime::UNIX_EPOCH)
                        .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
 
-               self.verify_and_respond_using_derived_keys_no_std(
-                       payment_paths, payment_hash, created_at, expanded_key, secp_ctx
-               )
+               self.respond_using_derived_keys_no_std(payment_paths, payment_hash, created_at)
        }
 
        /// Creates an [`InvoiceBuilder`] for the request using the given required fields and that uses
@@ -588,42 +671,22 @@ impl InvoiceRequest {
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-       pub fn verify_and_respond_using_derived_keys_no_std<T: secp256k1::Signing>(
+       pub fn respond_using_derived_keys_no_std(
                &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
-               created_at: core::time::Duration, expanded_key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+               created_at: core::time::Duration
        ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError> {
-               if self.invoice_request_features().requires_unknown_bits() {
+               if self.inner.invoice_request_features().requires_unknown_bits() {
                        return Err(Bolt12SemanticError::UnknownRequiredFeatures);
                }
 
-               let keys = match self.verify(expanded_key, secp_ctx) {
-                       Err(()) => return Err(Bolt12SemanticError::InvalidMetadata),
-                       Ok(None) => return Err(Bolt12SemanticError::InvalidMetadata),
-                       Ok(Some(keys)) => keys,
+               let keys = match self.keys {
+                       None => return Err(Bolt12SemanticError::InvalidMetadata),
+                       Some(keys) => keys,
                };
 
-               InvoiceBuilder::for_offer_using_keys(self, payment_paths, created_at, payment_hash, keys)
-       }
-
-       /// Verifies that the request was for an offer created using the given key. Returns the derived
-       /// keys need to sign an [`Bolt12Invoice`] for the request if they could be extracted from the
-       /// metadata.
-       ///
-       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-       pub fn verify<T: secp256k1::Signing>(
-               &self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
-       ) -> Result<Option<KeyPair>, ()> {
-               self.contents.inner.offer.verify(&self.bytes, key, secp_ctx)
-       }
-
-       #[cfg(test)]
-       fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
-               let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream) =
-                       self.contents.as_tlv_stream();
-               let signature_tlv_stream = SignatureTlvStreamRef {
-                       signature: Some(&self.signature),
-               };
-               (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream, signature_tlv_stream)
+               InvoiceBuilder::for_offer_using_keys(
+                       &self.inner, payment_paths, created_at, payment_hash, keys
+               )
        }
 }
 
@@ -633,7 +696,7 @@ impl InvoiceRequestContents {
        }
 
        pub(super) fn derives_keys(&self) -> bool {
-               self.inner.payer.0.derives_keys()
+               self.inner.payer.0.derives_payer_keys()
        }
 
        pub(super) fn chain(&self) -> ChainHash {
@@ -866,6 +929,7 @@ mod tests {
        #[cfg(feature = "std")]
        use core::time::Duration;
        use crate::sign::KeyMaterial;
+       use crate::ln::channelmanager::PaymentId;
        use crate::ln::features::{InvoiceRequestFeatures, OfferFeatures};
        use crate::ln::inbound_payment::ExpandedKey;
        use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -1011,12 +1075,13 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let offer = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .amount_msats(1000)
                        .build().unwrap();
                let invoice_request = offer
-                       .request_invoice_deriving_metadata(payer_id, &expanded_key, &entropy)
+                       .request_invoice_deriving_metadata(payer_id, &expanded_key, &entropy, payment_id)
                        .unwrap()
                        .build().unwrap()
                        .sign(payer_sign).unwrap();
@@ -1026,7 +1091,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                // Fails verification with altered fields
                let (
@@ -1049,7 +1117,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered metadata
                let (
@@ -1072,7 +1140,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
@@ -1080,12 +1148,13 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let offer = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .amount_msats(1000)
                        .build().unwrap();
                let invoice_request = offer
-                       .request_invoice_deriving_payer_id(&expanded_key, &entropy, &secp_ctx)
+                       .request_invoice_deriving_payer_id(&expanded_key, &entropy, &secp_ctx, payment_id)
                        .unwrap()
                        .build_and_sign()
                        .unwrap();
@@ -1094,7 +1163,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                // Fails verification with altered fields
                let (
@@ -1117,7 +1189,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered payer id
                let (
@@ -1140,7 +1212,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
index c62702711c604d79047334c3f79b7217d7883b52..3593b14f1a81185c92536e382877bb0079f87051 100644 (file)
@@ -22,7 +22,6 @@ pub mod merkle;
 pub mod parse;
 mod payer;
 pub mod refund;
-#[allow(unused)]
 pub(crate) mod signer;
 #[cfg(test)]
-mod test_utils;
+pub(crate) mod test_utils;
index f6aa354b9e4f3d6c9efd2aee4ac47a04c8741dc0..e0bc63e8b2b8109e0b9243a1a32ecfb0a5a3d073 100644 (file)
@@ -77,6 +77,7 @@ use core::time::Duration;
 use crate::sign::EntropySource;
 use crate::io;
 use crate::blinded_path::BlindedPath;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::OfferFeatures;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::ln::msgs::MAX_VALUE_MSAT;
@@ -169,7 +170,7 @@ impl<'a, T: secp256k1::Signing> OfferBuilder<'a, DerivedMetadata, T> {
                secp_ctx: &'a Secp256k1<T>
        ) -> Self where ES::Target: EntropySource {
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, None);
                let metadata = Metadata::DerivedSigningPubkey(derivation_material);
                OfferBuilder {
                        offer: OfferContents {
@@ -283,7 +284,7 @@ impl<'a, M: MetadataStrategy, T: secp256k1::Signing> OfferBuilder<'a, M, T> {
                                let mut tlv_stream = self.offer.as_tlv_stream();
                                debug_assert_eq!(tlv_stream.metadata, None);
                                tlv_stream.metadata = None;
-                               if metadata.derives_keys() {
+                               if metadata.derives_recipient_keys() {
                                        tlv_stream.node_id = None;
                                }
 
@@ -454,10 +455,12 @@ impl Offer {
 
        /// Similar to [`Offer::request_invoice`] except it:
        /// - derives the [`InvoiceRequest::payer_id`] such that a different key can be used for each
-       ///   request, and
-       /// - sets the [`InvoiceRequest::payer_metadata`] when [`InvoiceRequestBuilder::build`] is
-       ///   called such that it can be used by [`Bolt12Invoice::verify`] to determine if the invoice
-       ///   was requested using a base [`ExpandedKey`] from which the payer id was derived.
+       ///   request,
+       /// - sets [`InvoiceRequest::payer_metadata`] when [`InvoiceRequestBuilder::build`] is called
+       ///   such that it can be used by [`Bolt12Invoice::verify`] to determine if the invoice was
+       ///   requested using a base [`ExpandedKey`] from which the payer id was derived, and
+       /// - includes the [`PaymentId`] encrypted in [`InvoiceRequest::payer_metadata`] so that it can
+       ///   be used when sending the payment for the requested invoice.
        ///
        /// Useful to protect the sender's privacy.
        ///
@@ -468,7 +471,8 @@ impl Offer {
        /// [`Bolt12Invoice::verify`]: crate::offers::invoice::Bolt12Invoice::verify
        /// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
        pub fn request_invoice_deriving_payer_id<'a, 'b, ES: Deref, T: secp256k1::Signing>(
-               &'a self, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>
+               &'a self, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>,
+               payment_id: PaymentId
        ) -> Result<InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T>, Bolt12SemanticError>
        where
                ES::Target: EntropySource,
@@ -477,7 +481,9 @@ impl Offer {
                        return Err(Bolt12SemanticError::UnknownRequiredFeatures);
                }
 
-               Ok(InvoiceRequestBuilder::deriving_payer_id(self, expanded_key, entropy_source, secp_ctx))
+               Ok(InvoiceRequestBuilder::deriving_payer_id(
+                       self, expanded_key, entropy_source, secp_ctx, payment_id
+               ))
        }
 
        /// Similar to [`Offer::request_invoice_deriving_payer_id`] except uses `payer_id` for the
@@ -489,7 +495,8 @@ impl Offer {
        ///
        /// [`InvoiceRequest::payer_id`]: crate::offers::invoice_request::InvoiceRequest::payer_id
        pub fn request_invoice_deriving_metadata<ES: Deref>(
-               &self, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES
+               &self, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
+               payment_id: PaymentId
        ) -> Result<InvoiceRequestBuilder<ExplicitPayerId, secp256k1::SignOnly>, Bolt12SemanticError>
        where
                ES::Target: EntropySource,
@@ -498,7 +505,9 @@ impl Offer {
                        return Err(Bolt12SemanticError::UnknownRequiredFeatures);
                }
 
-               Ok(InvoiceRequestBuilder::deriving_metadata(self, payer_id, expanded_key, entropy_source))
+               Ok(InvoiceRequestBuilder::deriving_metadata(
+                       self, payer_id, expanded_key, entropy_source, payment_id
+               ))
        }
 
        /// Creates an [`InvoiceRequestBuilder`] for the offer with the given `metadata` and `payer_id`,
@@ -661,11 +670,13 @@ impl OfferContents {
                                let tlv_stream = TlvStream::new(bytes).range(OFFER_TYPES).filter(|record| {
                                        match record.r#type {
                                                OFFER_METADATA_TYPE => false,
-                                               OFFER_NODE_ID_TYPE => !self.metadata.as_ref().unwrap().derives_keys(),
+                                               OFFER_NODE_ID_TYPE => {
+                                                       !self.metadata.as_ref().unwrap().derives_recipient_keys()
+                                               },
                                                _ => true,
                                        }
                                });
-                               signer::verify_metadata(
+                               signer::verify_recipient_metadata(
                                        metadata, key, IV_BYTES, self.signing_pubkey(), tlv_stream, secp_ctx
                                )
                        },
index d419e8fe0d2b41e06c8b44f5f6215d8d07a221a9..4b4572b4df9c85fd8970e9758b727d9b6214a067 100644 (file)
@@ -82,6 +82,7 @@ use crate::sign::EntropySource;
 use crate::io;
 use crate::blinded_path::BlindedPath;
 use crate::ln::PaymentHash;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::InvoiceRequestFeatures;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -147,18 +148,22 @@ impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
        /// Also, sets the metadata when [`RefundBuilder::build`] is called such that it can be used to
        /// verify that an [`InvoiceRequest`] was produced for the refund given an [`ExpandedKey`].
        ///
+       /// The `payment_id` is encrypted in the metadata and should be unique. This ensures that only
+       /// one invoice will be paid for the refund and that payments can be uniquely identified.
+       ///
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        /// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
        pub fn deriving_payer_id<ES: Deref>(
                description: String, node_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
-               secp_ctx: &'a Secp256k1<T>, amount_msats: u64
+               secp_ctx: &'a Secp256k1<T>, amount_msats: u64, payment_id: PaymentId
        ) -> Result<Self, Bolt12SemanticError> where ES::Target: EntropySource {
                if amount_msats > MAX_VALUE_MSAT {
                        return Err(Bolt12SemanticError::InvalidAmount);
                }
 
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let payment_id = Some(payment_id);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, payment_id);
                let metadata = Metadata::DerivedSigningPubkey(derivation_material);
                Ok(Self {
                        refund: RefundContents {
@@ -244,7 +249,7 @@ impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
 
                        let mut tlv_stream = self.refund.as_tlv_stream();
                        tlv_stream.0.metadata = None;
-                       if metadata.derives_keys() {
+                       if metadata.derives_payer_keys() {
                                tlv_stream.2.payer_id = None;
                        }
 
@@ -566,7 +571,7 @@ impl RefundContents {
        }
 
        pub(super) fn derives_keys(&self) -> bool {
-               self.payer.0.derives_keys()
+               self.payer.0.derives_payer_keys()
        }
 
        pub(super) fn as_tlv_stream(&self) -> RefundTlvStreamRef {
@@ -748,6 +753,7 @@ mod tests {
        use core::time::Duration;
        use crate::blinded_path::{BlindedHop, BlindedPath};
        use crate::sign::KeyMaterial;
+       use crate::ln::channelmanager::PaymentId;
        use crate::ln::features::{InvoiceRequestFeatures, OfferFeatures};
        use crate::ln::inbound_payment::ExpandedKey;
        use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -841,9 +847,10 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let refund = RefundBuilder
-                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000)
+                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000, payment_id)
                        .unwrap()
                        .build().unwrap();
                assert_eq!(refund.payer_id(), node_id);
@@ -854,7 +861,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                let mut tlv_stream = refund.as_tlv_stream();
                tlv_stream.2.amount = Some(2000);
@@ -867,7 +877,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered metadata
                let mut tlv_stream = refund.as_tlv_stream();
@@ -882,7 +892,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
@@ -892,6 +902,7 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let blinded_path = BlindedPath {
                        introduction_node_id: pubkey(40),
@@ -903,7 +914,7 @@ mod tests {
                };
 
                let refund = RefundBuilder
-                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000)
+                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000, payment_id)
                        .unwrap()
                        .path(blinded_path)
                        .build().unwrap();
@@ -914,7 +925,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                // Fails verification with altered fields
                let mut tlv_stream = refund.as_tlv_stream();
@@ -928,7 +942,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered payer_id
                let mut tlv_stream = refund.as_tlv_stream();
@@ -943,7 +957,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
index 8d5f98e6f6b050993474bbedbcc9a0f25c409980..4d5d4662bd62b806cb78543e41653c266a02146a 100644 (file)
@@ -16,15 +16,26 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey, self};
 use core::convert::TryFrom;
 use core::fmt;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::offers::merkle::TlvRecord;
 use crate::util::ser::Writeable;
 
 use crate::prelude::*;
 
+// Use a different HMAC input for each derivation. Otherwise, an attacker could:
+// - take an Offer that has metadata consisting of a nonce and HMAC
+// - strip off the HMAC and replace the signing_pubkey where the privkey is the HMAC,
+// - generate and sign an invoice using the new signing_pubkey, and
+// - claim they paid it since they would know the preimage of the invoice's payment_hash
 const DERIVED_METADATA_HMAC_INPUT: &[u8; 16] = &[1; 16];
 const DERIVED_METADATA_AND_KEYS_HMAC_INPUT: &[u8; 16] = &[2; 16];
 
+// Additional HMAC inputs to distinguish use cases, either Offer or Refund/InvoiceRequest, where
+// metadata for the latter contain an encrypted PaymentId.
+const WITHOUT_ENCRYPTED_PAYMENT_ID_HMAC_INPUT: &[u8; 16] = &[3; 16];
+const WITH_ENCRYPTED_PAYMENT_ID_HMAC_INPUT: &[u8; 16] = &[4; 16];
+
 /// Message metadata which possibly is derived from [`MetadataMaterial`] such that it can be
 /// verified.
 #[derive(Clone)]
@@ -56,7 +67,20 @@ impl Metadata {
                }
        }
 
-       pub fn derives_keys(&self) -> bool {
+       pub fn derives_payer_keys(&self) -> bool {
+               match self {
+                       // Infer whether Metadata::derived_from was called on Metadata::DerivedSigningPubkey to
+                       // produce Metadata::Bytes. This is merely to determine which fields should be included
+                       // when verifying a message. It doesn't necessarily indicate that keys were in fact
+                       // derived, as wouldn't be the case if a Metadata::Bytes with length PaymentId::LENGTH +
+                       // Nonce::LENGTH had been set explicitly.
+                       Metadata::Bytes(bytes) => bytes.len() == PaymentId::LENGTH + Nonce::LENGTH,
+                       Metadata::Derived(_) => false,
+                       Metadata::DerivedSigningPubkey(_) => true,
+               }
+       }
+
+       pub fn derives_recipient_keys(&self) -> bool {
                match self {
                        // Infer whether Metadata::derived_from was called on Metadata::DerivedSigningPubkey to
                        // produce Metadata::Bytes. This is merely to determine which fields should be included
@@ -132,20 +156,33 @@ impl PartialEq for Metadata {
 pub(super) struct MetadataMaterial {
        nonce: Nonce,
        hmac: HmacEngine<Sha256>,
+       // Some for payer metadata and None for offer metadata
+       encrypted_payment_id: Option<[u8; PaymentId::LENGTH]>,
 }
 
 impl MetadataMaterial {
-       pub fn new(nonce: Nonce, expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN]) -> Self {
+       pub fn new(
+               nonce: Nonce, expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
+               payment_id: Option<PaymentId>
+       ) -> Self {
+               // Encrypt payment_id
+               let encrypted_payment_id = payment_id.map(|payment_id| {
+                       expanded_key.crypt_for_offer(payment_id.0, nonce)
+               });
+
                Self {
                        nonce,
                        hmac: expanded_key.hmac_for_offer(nonce, iv_bytes),
+                       encrypted_payment_id,
                }
        }
 
        fn derive_metadata(mut self) -> Vec<u8> {
                self.hmac.input(DERIVED_METADATA_HMAC_INPUT);
+               self.maybe_include_encrypted_payment_id();
 
-               let mut bytes = self.nonce.as_slice().to_vec();
+               let mut bytes = self.encrypted_payment_id.map(|id| id.to_vec()).unwrap_or(vec![]);
+               bytes.extend_from_slice(self.nonce.as_slice());
                bytes.extend_from_slice(&Hmac::from_engine(self.hmac).into_inner());
                bytes
        }
@@ -154,11 +191,26 @@ impl MetadataMaterial {
                mut self, secp_ctx: &Secp256k1<T>
        ) -> (Vec<u8>, KeyPair) {
                self.hmac.input(DERIVED_METADATA_AND_KEYS_HMAC_INPUT);
+               self.maybe_include_encrypted_payment_id();
+
+               let mut bytes = self.encrypted_payment_id.map(|id| id.to_vec()).unwrap_or(vec![]);
+               bytes.extend_from_slice(self.nonce.as_slice());
 
                let hmac = Hmac::from_engine(self.hmac);
                let privkey = SecretKey::from_slice(hmac.as_inner()).unwrap();
                let keys = KeyPair::from_secret_key(secp_ctx, &privkey);
-               (self.nonce.as_slice().to_vec(), keys)
+
+               (bytes, keys)
+       }
+
+       fn maybe_include_encrypted_payment_id(&mut self) {
+               match self.encrypted_payment_id {
+                       None => self.hmac.input(WITHOUT_ENCRYPTED_PAYMENT_ID_HMAC_INPUT),
+                       Some(encrypted_payment_id) => {
+                               self.hmac.input(WITH_ENCRYPTED_PAYMENT_ID_HMAC_INPUT);
+                               self.hmac.input(&encrypted_payment_id)
+                       },
+               }
        }
 }
 
@@ -170,19 +222,65 @@ pub(super) fn derive_keys(nonce: Nonce, expanded_key: &ExpandedKey) -> KeyPair {
        KeyPair::from_secret_key(&secp_ctx, &privkey)
 }
 
+/// Verifies data given in a TLV stream was used to produce the given metadata, consisting of:
+/// - a 256-bit [`PaymentId`],
+/// - a 128-bit [`Nonce`], and possibly
+/// - a [`Sha256`] hash of the nonce and the TLV records using the [`ExpandedKey`].
+///
+/// If the latter is not included in the metadata, the TLV stream is used to check if the given
+/// `signing_pubkey` can be derived from it.
+///
+/// Returns the [`PaymentId`] that should be used for sending the payment.
+pub(super) fn verify_payer_metadata<'a, T: secp256k1::Signing>(
+       metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
+       signing_pubkey: PublicKey, tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>,
+       secp_ctx: &Secp256k1<T>
+) -> Result<PaymentId, ()> {
+       if metadata.len() < PaymentId::LENGTH {
+               return Err(());
+       }
+
+       let mut encrypted_payment_id = [0u8; PaymentId::LENGTH];
+       encrypted_payment_id.copy_from_slice(&metadata[..PaymentId::LENGTH]);
+
+       let mut hmac = hmac_for_message(
+               &metadata[PaymentId::LENGTH..], expanded_key, iv_bytes, tlv_stream
+       )?;
+       hmac.input(WITH_ENCRYPTED_PAYMENT_ID_HMAC_INPUT);
+       hmac.input(&encrypted_payment_id);
+
+       verify_metadata(
+               &metadata[PaymentId::LENGTH..], Hmac::from_engine(hmac), signing_pubkey, secp_ctx
+       )?;
+
+       let nonce = Nonce::try_from(&metadata[PaymentId::LENGTH..][..Nonce::LENGTH]).unwrap();
+       let payment_id = expanded_key.crypt_for_offer(encrypted_payment_id, nonce);
+
+       Ok(PaymentId(payment_id))
+}
+
 /// Verifies data given in a TLV stream was used to produce the given metadata, consisting of:
 /// - a 128-bit [`Nonce`] and possibly
 /// - a [`Sha256`] hash of the nonce and the TLV records using the [`ExpandedKey`].
 ///
 /// If the latter is not included in the metadata, the TLV stream is used to check if the given
 /// `signing_pubkey` can be derived from it.
-pub(super) fn verify_metadata<'a, T: secp256k1::Signing>(
+///
+/// Returns the [`KeyPair`] for signing the invoice, if it can be derived from the metadata.
+pub(super) fn verify_recipient_metadata<'a, T: secp256k1::Signing>(
        metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
        signing_pubkey: PublicKey, tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>,
        secp_ctx: &Secp256k1<T>
 ) -> Result<Option<KeyPair>, ()> {
-       let hmac = hmac_for_message(metadata, expanded_key, iv_bytes, tlv_stream)?;
+       let mut hmac = hmac_for_message(metadata, expanded_key, iv_bytes, tlv_stream)?;
+       hmac.input(WITHOUT_ENCRYPTED_PAYMENT_ID_HMAC_INPUT);
+
+       verify_metadata(metadata, Hmac::from_engine(hmac), signing_pubkey, secp_ctx)
+}
 
+fn verify_metadata<T: secp256k1::Signing>(
+       metadata: &[u8], hmac: Hmac<Sha256>, signing_pubkey: PublicKey, secp_ctx: &Secp256k1<T>
+) -> Result<Option<KeyPair>, ()> {
        if metadata.len() == Nonce::LENGTH {
                let derived_keys = KeyPair::from_secret_key(
                        secp_ctx, &SecretKey::from_slice(hmac.as_inner()).unwrap()
@@ -206,7 +304,7 @@ pub(super) fn verify_metadata<'a, T: secp256k1::Signing>(
 fn hmac_for_message<'a>(
        metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
        tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>
-) -> Result<Hmac<Sha256>, ()> {
+) -> Result<HmacEngine<Sha256>, ()> {
        if metadata.len() < Nonce::LENGTH {
                return Err(());
        }
@@ -227,5 +325,5 @@ fn hmac_for_message<'a>(
                hmac.input(DERIVED_METADATA_HMAC_INPUT);
        }
 
-       Ok(Hmac::from_engine(hmac))
+       Ok(hmac)
 }
index f1b3c79edc0ec15cd5f68a406c5f6a8e7f7c3f91..39122472eacb5513640f7fd78020a10ea783957d 100644 (file)
@@ -20,33 +20,33 @@ use crate::ln::features::BlindedHopFeatures;
 use crate::offers::invoice::BlindedPayInfo;
 use crate::offers::merkle::TaggedHash;
 
-pub(super) fn payer_keys() -> KeyPair {
+pub(crate) fn payer_keys() -> KeyPair {
        let secp_ctx = Secp256k1::new();
        KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap())
 }
 
-pub(super) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
        let secp_ctx = Secp256k1::new();
        let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
        Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
 }
 
-pub(super) fn payer_pubkey() -> PublicKey {
+pub(crate) fn payer_pubkey() -> PublicKey {
        payer_keys().public_key()
 }
 
-pub(super) fn recipient_keys() -> KeyPair {
+pub(crate) fn recipient_keys() -> KeyPair {
        let secp_ctx = Secp256k1::new();
        KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap())
 }
 
-pub(super) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
        let secp_ctx = Secp256k1::new();
        let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
        Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
 }
 
-pub(super) fn recipient_pubkey() -> PublicKey {
+pub(crate) fn recipient_pubkey() -> PublicKey {
        recipient_keys().public_key()
 }
 
@@ -59,7 +59,7 @@ pub(super) fn privkey(byte: u8) -> SecretKey {
        SecretKey::from_slice(&[byte; 32]).unwrap()
 }
 
-pub(super) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
+pub(crate) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
        let paths = vec![
                BlindedPath {
                        introduction_node_id: pubkey(40),
@@ -101,17 +101,17 @@ pub(super) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
        payinfo.into_iter().zip(paths.into_iter()).collect()
 }
 
-pub(super) fn payment_hash() -> PaymentHash {
+pub(crate) fn payment_hash() -> PaymentHash {
        PaymentHash([42; 32])
 }
 
-pub(super) fn now() -> Duration {
+pub(crate) fn now() -> Duration {
        std::time::SystemTime::now()
                .duration_since(std::time::SystemTime::UNIX_EPOCH)
                .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH")
 }
 
-pub(super) struct FixedEntropy;
+pub(crate) struct FixedEntropy;
 
 impl EntropySource for FixedEntropy {
        fn get_secure_random_bytes(&self) -> [u8; 32] {
index bb5531242188ba28028952b47aaeb3899a6e722c..ef776a44dc11c484526f7f5baeb64db06498710b 100644 (file)
@@ -23,8 +23,9 @@ use bitcoin::network::constants::Network;
 use bitcoin::blockdata::constants::genesis_block;
 
 use crate::events::{MessageSendEvent, MessageSendEventsProvider};
+use crate::ln::ChannelId;
 use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
-use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
+use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT};
 use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
 use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
 use crate::ln::msgs;
@@ -382,7 +383,7 @@ macro_rules! secp_verify_sig {
                                        err: format!("Invalid signature on {} message", $msg_type),
                                        action: ErrorAction::SendWarningMessage {
                                                msg: msgs::WarningMessage {
-                                                       channel_id: [0; 32],
+                                                       channel_id: ChannelId::new_zero(),
                                                        data: format!("Invalid signature on {} message", $msg_type),
                                                },
                                                log_level: Level::Trace,
@@ -400,7 +401,7 @@ macro_rules! get_pubkey_from_node_id {
                                err: format!("Invalid public key on {} message", $msg_type),
                                action: ErrorAction::SendWarningMessage {
                                        msg: msgs::WarningMessage {
-                                               channel_id: [0; 32],
+                                               channel_id: ChannelId::new_zero(),
                                                data: format!("Invalid public key on {} message", $msg_type),
                                        },
                                        log_level: Level::Trace
@@ -1127,7 +1128,7 @@ pub struct NodeAnnouncementInfo {
 
 impl NodeAnnouncementInfo {
        /// Internet-level addresses via which one can connect to the node
-       pub fn addresses(&self) -> &[NetAddress] {
+       pub fn addresses(&self) -> &[SocketAddress] {
                self.announcement_message.as_ref()
                        .map(|msg| msg.contents.addresses.as_slice())
                        .unwrap_or_default()
@@ -1136,7 +1137,7 @@ impl NodeAnnouncementInfo {
 
 impl Writeable for NodeAnnouncementInfo {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               let empty_addresses = Vec::<NetAddress>::new();
+               let empty_addresses = Vec::<SocketAddress>::new();
                write_tlv_fields!(writer, {
                        (0, self.features, required),
                        (2, self.last_update, required),
@@ -1159,7 +1160,7 @@ impl Readable for NodeAnnouncementInfo {
                        (8, announcement_message, option),
                        (10, _addresses, optional_vec), // deprecated, not used anymore
                });
-               let _: Option<Vec<NetAddress>> = _addresses;
+               let _: Option<Vec<SocketAddress>> = _addresses;
                Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(),
                        alias: alias.0.unwrap(), announcement_message })
        }
@@ -1235,7 +1236,7 @@ impl Writeable for NodeInfo {
 }
 
 // A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is
-// necessary to maintain compatibility with previous serializations of `NetAddress` that have an
+// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an
 // invalid hostname set. We ignore and eat all errors until we are either able to read a
 // `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
 struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
@@ -2038,7 +2039,7 @@ impl ReadOnlyNetworkGraph<'_> {
        /// Get network addresses by node id.
        /// Returns None if the requested node is completely unknown,
        /// or if node announcement for the node was never received.
-       pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<NetAddress>> {
+       pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<SocketAddress>> {
                self.nodes.get(&NodeId::from_pubkey(&pubkey))
                        .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
        }
index 1f143ed938dcf8435334c3c646057cee7163e200..9c5fe8e1f9bd3dab59033644591a4a59b49d173b 100644 (file)
@@ -20,22 +20,22 @@ use crate::ln::features::{Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelF
 use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
 use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
 use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
-use crate::routing::scoring::{ChannelUsage, LockableScore, Score};
+use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp};
 use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer};
 use crate::util::logger::{Level, Logger};
 use crate::util::chacha20::ChaCha20;
 
 use crate::io;
 use crate::prelude::*;
-use crate::sync::{Mutex};
+use crate::sync::Mutex;
 use alloc::collections::BinaryHeap;
 use core::{cmp, fmt};
-use core::ops::{Deref, DerefMut};
+use core::ops::Deref;
 
 /// A [`Router`] implemented using [`find_route`].
-pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> where
+pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
        L::Target: Logger,
-       S::Target: for <'a> LockableScore<'a, Score = Sc>,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        network_graph: G,
        logger: L,
@@ -44,9 +44,9 @@ pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref,
        score_params: SP
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
        L::Target: Logger,
-       S::Target: for <'a> LockableScore<'a, Score = Sc>,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        /// Creates a new router.
        pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S, score_params: SP) -> Self {
@@ -55,9 +55,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Scor
        }
 }
 
-impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
+impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
        L::Target: Logger,
-       S::Target: for <'a> LockableScore<'a, Score = Sc>,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        fn find_route(
                &self,
@@ -73,7 +73,7 @@ impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Sco
                };
                find_route(
                        payer, params, &self.network_graph, first_hops, &*self.logger,
-                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.lock().deref_mut(), &inflight_htlcs),
+                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.read_lock(), &inflight_htlcs),
                        &self.score_params,
                        &random_seed_bytes
                )
@@ -106,21 +106,20 @@ pub trait Router {
        }
 }
 
-/// [`Score`] implementation that factors in in-flight HTLC liquidity.
+/// [`ScoreLookUp`] implementation that factors in in-flight HTLC liquidity.
 ///
-/// Useful for custom [`Router`] implementations to wrap their [`Score`] on-the-fly when calling
+/// Useful for custom [`Router`] implementations to wrap their [`ScoreLookUp`] on-the-fly when calling
 /// [`find_route`].
 ///
-/// [`Score`]: crate::routing::scoring::Score
-pub struct ScorerAccountingForInFlightHtlcs<'a, S: Score<ScoreParams = SP>, SP: Sized> {
-       scorer: &'a mut S,
+/// [`ScoreLookUp`]: crate::routing::scoring::ScoreLookUp
+pub struct ScorerAccountingForInFlightHtlcs<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> {
+       scorer: S,
        // Maps a channel's short channel id and its direction to the liquidity used up.
        inflight_htlcs: &'a InFlightHtlcs,
 }
-
-impl<'a, S: Score<ScoreParams = SP>, SP: Sized> ScorerAccountingForInFlightHtlcs<'a, S, SP> {
+impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
        /// Initialize a new `ScorerAccountingForInFlightHtlcs`.
-       pub fn new(scorer: &'a mut S, inflight_htlcs: &'a InFlightHtlcs) -> Self {
+       pub fn new(scorer: S, inflight_htlcs: &'a InFlightHtlcs) -> Self {
                ScorerAccountingForInFlightHtlcs {
                        scorer,
                        inflight_htlcs
@@ -129,12 +128,12 @@ impl<'a, S: Score<ScoreParams = SP>, SP: Sized> ScorerAccountingForInFlightHtlcs
 }
 
 #[cfg(c_bindings)]
-impl<'a, S: Score<ScoreParams = SP>, SP: Sized> Writeable for ScorerAccountingForInFlightHtlcs<'a, S, SP> {
+impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> Writeable for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) }
 }
 
-impl<'a, S: Score<ScoreParams = SP>, SP: Sized> Score for ScorerAccountingForInFlightHtlcs<'a, S, SP>  {
-       type ScoreParams = S::ScoreParams;
+impl<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
+       type ScoreParams = Sc::ScoreParams;
        fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
                if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat(
                        source, target, short_channel_id
@@ -149,22 +148,6 @@ impl<'a, S: Score<ScoreParams = SP>, SP: Sized> Score for ScorerAccountingForInF
                        self.scorer.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
                }
        }
-
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.scorer.payment_path_failed(path, short_channel_id)
-       }
-
-       fn payment_path_successful(&mut self, path: &Path) {
-               self.scorer.payment_path_successful(path)
-       }
-
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.scorer.probe_failed(path, short_channel_id)
-       }
-
-       fn probe_successful(&mut self, path: &Path) {
-               self.scorer.probe_successful(path)
-       }
 }
 
 /// A data structure for tracking in-flight HTLCs. May be used during pathfinding to account for
@@ -354,23 +337,34 @@ pub struct Route {
        /// [`BlindedTail`]s are present, then the pubkey of the last [`RouteHop`] in each path must be
        /// the same.
        pub paths: Vec<Path>,
-       /// The `payment_params` parameter passed via [`RouteParameters`] to [`find_route`].
+       /// The `route_params` parameter passed to [`find_route`].
        ///
        /// This is used by `ChannelManager` to track information which may be required for retries.
-       pub payment_params: Option<PaymentParameters>,
+       ///
+       /// Will be `None` for objects serialized with LDK versions prior to 0.0.117.
+       pub route_params: Option<RouteParameters>,
 }
 
 impl Route {
        /// Returns the total amount of fees paid on this [`Route`].
        ///
-       /// This doesn't include any extra payment made to the recipient, which can happen in excess of
-       /// the amount passed to [`find_route`]'s `route_params.final_value_msat`.
+       /// For objects serialized with LDK 0.0.117 and after, this includes any extra payment made to
+       /// the recipient, which can happen in excess of the amount passed to [`find_route`] via
+       /// [`RouteParameters::final_value_msat`], if we had to reach the [`htlc_minimum_msat`] limits.
+       ///
+       /// [`htlc_minimum_msat`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#the-channel_update-message
        pub fn get_total_fees(&self) -> u64 {
-               self.paths.iter().map(|path| path.fee_msat()).sum()
+               let overpaid_value_msat = self.route_params.as_ref()
+                       .map_or(0, |p| self.get_total_amount().saturating_sub(p.final_value_msat));
+               overpaid_value_msat + self.paths.iter().map(|path| path.fee_msat()).sum::<u64>()
        }
 
-       /// Returns the total amount paid on this [`Route`], excluding the fees. Might be more than
-       /// requested if we had to reach htlc_minimum_msat.
+       /// Returns the total amount paid on this [`Route`], excluding the fees.
+       ///
+       /// Might be more than requested as part of the given [`RouteParameters::final_value_msat`] if
+       /// we had to reach the [`htlc_minimum_msat`] limits.
+       ///
+       /// [`htlc_minimum_msat`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#the-channel_update-message
        pub fn get_total_amount(&self) -> u64 {
                self.paths.iter().map(|path| path.final_value_msat()).sum()
        }
@@ -400,8 +394,11 @@ impl Writeable for Route {
                        }
                }
                write_tlv_fields!(writer, {
-                       (1, self.payment_params, option),
+                       // For compatibility with LDK versions prior to 0.0.117, we take the individual
+                       // RouteParameters' fields and reconstruct them on read.
+                       (1, self.route_params.as_ref().map(|p| &p.payment_params), option),
                        (2, blinded_tails, optional_vec),
+                       (3, self.route_params.as_ref().map(|p| p.final_value_msat), option),
                });
                Ok(())
        }
@@ -428,6 +425,7 @@ impl Readable for Route {
                _init_and_read_len_prefixed_tlv_fields!(reader, {
                        (1, payment_params, (option: ReadableArgs, min_final_cltv_expiry_delta)),
                        (2, blinded_tails, optional_vec),
+                       (3, final_value_msat, option),
                });
                let blinded_tails = blinded_tails.unwrap_or(Vec::new());
                if blinded_tails.len() != 0 {
@@ -436,14 +434,23 @@ impl Readable for Route {
                                path.blinded_tail = blinded_tail_opt;
                        }
                }
-               Ok(Route { paths, payment_params })
+
+               // If we previously wrote the corresponding fields, reconstruct RouteParameters.
+               let route_params = match (payment_params, final_value_msat) {
+                       (Some(payment_params), Some(final_value_msat)) => {
+                               Some(RouteParameters { payment_params, final_value_msat })
+                       }
+                       _ => None,
+               };
+
+               Ok(Route { paths, route_params })
        }
 }
 
 /// Parameters needed to find a [`Route`].
 ///
 /// Passed to [`find_route`] and [`build_route_from_hops`].
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub struct RouteParameters {
        /// The parameters of the failed payment path.
        pub payment_params: PaymentParameters,
@@ -452,6 +459,13 @@ pub struct RouteParameters {
        pub final_value_msat: u64,
 }
 
+impl RouteParameters {
+       /// Constructs [`RouteParameters`] from the given [`PaymentParameters`] and a payment amount.
+       pub fn from_payment_params_and_value(payment_params: PaymentParameters, final_value_msat: u64) -> Self {
+               Self { payment_params, final_value_msat }
+       }
+}
+
 impl Writeable for RouteParameters {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_tlv_fields!(writer, {
@@ -1410,26 +1424,28 @@ fn sort_first_hop_channels(
 /// [`ChannelManager::list_usable_channels`]: crate::ln::channelmanager::ChannelManager::list_usable_channels
 /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
 /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
-pub fn find_route<L: Deref, GL: Deref, S: Score>(
+pub fn find_route<L: Deref, GL: Deref, S: ScoreLookUp>(
        our_node_pubkey: &PublicKey, route_params: &RouteParameters,
        network_graph: &NetworkGraph<GL>, first_hops: Option<&[&ChannelDetails]>, logger: L,
        scorer: &S, score_params: &S::ScoreParams, random_seed_bytes: &[u8; 32]
 ) -> Result<Route, LightningError>
 where L::Target: Logger, GL::Target: Logger {
        let graph_lock = network_graph.read_only();
-       let mut route = get_route(our_node_pubkey, &route_params.payment_params, &graph_lock, first_hops,
-               route_params.final_value_msat, logger, scorer, score_params,
-               random_seed_bytes)?;
+       let mut route = get_route(our_node_pubkey, &route_params, &graph_lock, first_hops, logger,
+               scorer, score_params, random_seed_bytes)?;
        add_random_cltv_offset(&mut route, &route_params.payment_params, &graph_lock, random_seed_bytes);
        Ok(route)
 }
 
-pub(crate) fn get_route<L: Deref, S: Score>(
-       our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
-       first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, logger: L, scorer: &S, score_params: &S::ScoreParams,
+pub(crate) fn get_route<L: Deref, S: ScoreLookUp>(
+       our_node_pubkey: &PublicKey, route_params: &RouteParameters, network_graph: &ReadOnlyNetworkGraph,
+       first_hops: Option<&[&ChannelDetails]>, logger: L, scorer: &S, score_params: &S::ScoreParams,
        _random_seed_bytes: &[u8; 32]
 ) -> Result<Route, LightningError>
 where L::Target: Logger {
+
+       let payment_params = &route_params.payment_params;
+       let final_value_msat = route_params.final_value_msat;
        // If we're routing to a blinded recipient, we won't have their node id. Therefore, keep the
        // unblinded payee id as an option. We also need a non-optional "payee id" for path construction,
        // so use a dummy id for this in the blinded case.
@@ -2497,7 +2513,7 @@ where L::Target: Logger {
                }
        }
 
-       let route = Route { paths, payment_params: Some(payment_params.clone()) };
+       let route = Route { paths, route_params: Some(route_params.clone()) };
        log_info!(logger, "Got route: {}", log_route!(route));
        Ok(route)
 }
@@ -2602,17 +2618,15 @@ pub fn build_route_from_hops<L: Deref, GL: Deref>(
 ) -> Result<Route, LightningError>
 where L::Target: Logger, GL::Target: Logger {
        let graph_lock = network_graph.read_only();
-       let mut route = build_route_from_hops_internal(
-               our_node_pubkey, hops, &route_params.payment_params, &graph_lock,
-               route_params.final_value_msat, logger, random_seed_bytes)?;
+       let mut route = build_route_from_hops_internal(our_node_pubkey, hops, &route_params,
+               &graph_lock, logger, random_seed_bytes)?;
        add_random_cltv_offset(&mut route, &route_params.payment_params, &graph_lock, random_seed_bytes);
        Ok(route)
 }
 
 fn build_route_from_hops_internal<L: Deref>(
-       our_node_pubkey: &PublicKey, hops: &[PublicKey], payment_params: &PaymentParameters,
-       network_graph: &ReadOnlyNetworkGraph, final_value_msat: u64, logger: L,
-       random_seed_bytes: &[u8; 32]
+       our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters,
+       network_graph: &ReadOnlyNetworkGraph, logger: L, random_seed_bytes: &[u8; 32],
 ) -> Result<Route, LightningError> where L::Target: Logger {
 
        struct HopScorer {
@@ -2620,7 +2634,7 @@ fn build_route_from_hops_internal<L: Deref>(
                hop_ids: [Option<NodeId>; MAX_PATH_LENGTH_ESTIMATE as usize],
        }
 
-       impl Score for HopScorer {
+       impl ScoreLookUp for HopScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(&self, _short_channel_id: u64, source: &NodeId, target: &NodeId,
                        _usage: ChannelUsage, _score_params: &Self::ScoreParams) -> u64
@@ -2638,14 +2652,6 @@ fn build_route_from_hops_internal<L: Deref>(
                        }
                        u64::max_value()
                }
-
-               fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-
-               fn payment_path_successful(&mut self, _path: &Path) {}
-
-               fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-
-               fn probe_successful(&mut self, _path: &Path) {}
        }
 
        impl<'a> Writeable for HopScorer {
@@ -2667,8 +2673,7 @@ fn build_route_from_hops_internal<L: Deref>(
 
        let scorer = HopScorer { our_node_id, hop_ids };
 
-       get_route(our_node_pubkey, payment_params, network_graph, None, final_value_msat,
-               logger, &scorer, &(), random_seed_bytes)
+       get_route(our_node_pubkey, route_params, network_graph, None, logger, &scorer, &(), random_seed_bytes)
 }
 
 #[cfg(test)]
@@ -2678,11 +2683,12 @@ mod tests {
        use crate::routing::utxo::UtxoResult;
        use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
                BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
-               DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
-       use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, Score, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
+               DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE, RouteParameters};
+       use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, ScoreLookUp, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
        use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel};
        use crate::chain::transaction::OutPoint;
        use crate::sign::EntropySource;
+       use crate::ln::ChannelId;
        use crate::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures, ChannelFeatures, InitFeatures, NodeFeatures};
        use crate::ln::msgs::{ErrorAction, LightningError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use crate::ln::channelmanager;
@@ -2715,7 +2721,7 @@ mod tests {
        fn get_channel_details(short_channel_id: Option<u64>, node_id: PublicKey,
                        features: InitFeatures, outbound_capacity_msat: u64) -> channelmanager::ChannelDetails {
                channelmanager::ChannelDetails {
-                       channel_id: [0; 32],
+                       channel_id: ChannelId::new_zero(),
                        counterparty: channelmanager::ChannelCounterparty {
                                features,
                                node_id,
@@ -2760,11 +2766,17 @@ mod tests {
 
                // Simple route to 2 via 1
 
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 0, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Cannot send a payment of 0 msat");
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 0);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Cannot send a payment of 0 msat");
                } else { panic!(); }
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -2795,12 +2807,15 @@ mod tests {
 
                let our_chans = vec![get_channel_details(Some(2), our_id, InitFeatures::from_le_bytes(vec![0b11]), 100000)];
 
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) =
-                       get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()),
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
+                               assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
                } else { panic!(); }
-
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
        }
 
@@ -2908,8 +2923,12 @@ mod tests {
                });
 
                // Not possible to send 199_999_999, because the minimum on channel=2 is 200_000_000.
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 199_999_999);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
 
                // Lift the restriction on the first hop.
@@ -2927,7 +2946,8 @@ mod tests {
                });
 
                // A payment above the minimum should pass
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
        }
 
@@ -3009,7 +3029,10 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 60_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                // Overpay fees to hit htlc_minimum_msat.
                let overpaid_fees = route.paths[0].hops[0].fee_msat + route.paths[1].hops[0].fee_msat;
                // TODO: this could be better balanced to overpay 10k and not 15k.
@@ -3054,14 +3077,17 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                // Fine to overpay for htlc_minimum_msat if it allows us to save fee.
                assert_eq!(route.paths.len(), 1);
                assert_eq!(route.paths[0].hops[0].short_channel_id, 12);
                let fees = route.paths[0].hops[0].fee_msat;
                assert_eq!(fees, 5_000);
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 50_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                // Not fine to overpay for htlc_minimum_msat if it requires paying more than fee on
                // the other channel.
                assert_eq!(route.paths.len(), 1);
@@ -3106,13 +3132,19 @@ mod tests {
                });
 
                // If all the channels require some features we don't understand, route should fail
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
 
                // If we specify a channel to node7, that overrides our local channel view and that gets used
-               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
+                       InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
@@ -3147,13 +3179,19 @@ mod tests {
                add_or_update_node(&gossip_sync, &secp_ctx, &privkeys[7], unknown_features.clone(), 1);
 
                // If all nodes require some features we don't understand, route should fail
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
 
                // If we specify a channel to node7, that overrides our local channel view and that gets used
-               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
+                       InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
@@ -3185,7 +3223,9 @@ mod tests {
 
                // Route to 1 via 2 and 3 because our channel to 1 is disabled
                let payment_params = PaymentParameters::from_node_id(nodes[0], 42);
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 3);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3211,8 +3251,12 @@ mod tests {
 
                // If we specify a channel to node7, that overrides our local channel view and that gets used
                let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
-               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
+                       InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
@@ -3334,14 +3378,21 @@ mod tests {
                let mut invalid_last_hops = last_hops_multi_private_channels(&nodes);
                invalid_last_hops.push(invalid_last_hop);
                {
-                       let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(invalid_last_hops).unwrap();
-                       if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Route hint cannot have the payee as the source.");
+                       let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                               .with_route_hints(invalid_last_hops).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+                       if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                               &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                               &random_seed_bytes) {
+                                       assert_eq!(err, "Route hint cannot have the payee as the source.");
                        } else { panic!(); }
                }
 
-               let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops_multi_private_channels(&nodes)).unwrap();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                       .with_route_hints(last_hops_multi_private_channels(&nodes)).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3416,8 +3467,9 @@ mod tests {
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
                // Test handling of an empty RouteHint passed in Invoice.
-
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3523,7 +3575,9 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3595,7 +3649,9 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &[42u8; 32]).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &[42u8; 32]).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3677,7 +3733,9 @@ mod tests {
                // This test shows that public routes can be present in the invoice
                // which would be handled in the same manner.
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3729,8 +3787,12 @@ mod tests {
                // Simple test with outbound channel to 4 to test that last_hops and first_hops connect
                let our_chans = vec![get_channel_details(Some(42), nodes[3].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
                let mut last_hops = last_hops(&nodes);
-               let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops.clone()).unwrap();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                       .with_route_hints(last_hops.clone()).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[3]);
@@ -3750,8 +3812,12 @@ mod tests {
                last_hops[0].0[0].fees.base_msat = 1000;
 
                // Revert to via 6 as the fee on 8 goes up
-               let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops).unwrap();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                       .with_route_hints(last_hops).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3785,7 +3851,9 @@ mod tests {
                assert_eq!(route.paths[0].hops[3].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
 
                // ...but still use 8 for larger payments as 6 has a variable feerate
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 2000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 2000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3850,8 +3918,10 @@ mod tests {
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
                let logger = ln_test_utils::TestLogger::new();
                let network_graph = NetworkGraph::new(Network::Testnet, &logger);
-               let route = get_route(&source_node_id, &payment_params, &network_graph.read_only(),
-                               Some(&our_chans.iter().collect::<Vec<_>>()), route_val, &logger, &scorer, &(), &random_seed_bytes);
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, route_val);
+               let route = get_route(&source_node_id, &route_params, &network_graph.read_only(),
+                               Some(&our_chans.iter().collect::<Vec<_>>()), &logger, &scorer, &(),
+                               &random_seed_bytes);
                route
        }
 
@@ -3972,15 +4042,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 250_000_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 250_000_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None,
+                                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 250_000_000, Arc::clone(&logger), &scorer, &(),&random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 250_000_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(),&random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4008,15 +4084,23 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 200_000_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(),
+                                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
+                                       &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 200_000_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                               Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                               &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4055,15 +4139,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4126,15 +4216,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4158,15 +4254,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 10_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 10_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 10_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4270,15 +4372,21 @@ mod tests {
                });
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 60_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 49 sats (just a bit below the capacity).
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 49_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 49_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4291,7 +4399,10 @@ mod tests {
 
                {
                        // Attempt to route an exact amount is also fine
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 50_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4339,7 +4450,10 @@ mod tests {
                });
 
                {
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 50_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4504,8 +4618,10 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 300_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                               &our_id, &payment_params, &network_graph.read_only(), None, 300_000,
+                               &our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
                                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
@@ -4514,8 +4630,10 @@ mod tests {
                {
                        // Attempt to route while setting max_path_count to 0 results in a failure.
                        let zero_payment_params = payment_params.clone().with_max_path_count(0);
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               zero_payment_params, 100);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                               &our_id, &zero_payment_params, &network_graph.read_only(), None, 100,
+                               &our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
                                        assert_eq!(err, "Can't find a route with no paths allowed.");
                        } else { panic!(); }
@@ -4526,8 +4644,10 @@ mod tests {
                        // This is the case because the minimal_value_contribution_msat would require each path
                        // to account for 1/3 of the total value, which is violated by 2 out of 3 paths.
                        let fail_payment_params = payment_params.clone().with_max_path_count(3);
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               fail_payment_params, 250_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                               &our_id, &fail_payment_params, &network_graph.read_only(), None, 250_000,
+                               &our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
                                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
@@ -4536,8 +4656,10 @@ mod tests {
                {
                        // Now, attempt to route 250 sats (just a bit below the capacity).
                        // Our algorithm should provide us with these 3 paths.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None,
-                               250_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 250_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4554,8 +4676,10 @@ mod tests {
 
                {
                        // Attempt to route an exact amount is also fine
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None,
-                               290_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 290_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4589,7 +4713,8 @@ mod tests {
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
                let config = UserConfig::default();
-               let payment_params = PaymentParameters::from_node_id(nodes[3], 42).with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[3], 42)
+                       .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
 
                // We need a route consisting of 3 paths:
                // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}.
@@ -4724,16 +4849,22 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 350_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 350_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 300 sats (exact amount we can route).
                        // Our algorithm should provide us with these 3 paths, 100 sats each.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 300_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 300_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
 
                        let mut total_amount_paid_msat = 0;
@@ -4894,7 +5025,10 @@ mod tests {
                {
                        // Now, attempt to route 180 sats.
                        // Our algorithm should provide us with these 2 paths.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 180_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 180_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
 
                        let mut total_value_transferred_msat = 0;
@@ -5064,15 +5198,20 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 210_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 210_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 200 sats (exact amount we can route).
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 200_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 200_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
 
                        let mut total_amount_paid_msat = 0;
@@ -5172,7 +5311,10 @@ mod tests {
 
                // Get a route for 100 sats and check that we found the MPP route no problem and didn't
                // overpay at all.
-               let mut route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100_000);
+               let mut route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                route.paths.sort_by_key(|path| path.hops[0].short_channel_id);
                // Paths are manually ordered ordered by SCID, so:
@@ -5290,16 +5432,22 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 150_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 150_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 125 sats (just a bit below the capacity of 3 channels).
                        // Our algorithm should provide us with these 3 paths.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 125_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 125_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -5312,7 +5460,10 @@ mod tests {
 
                {
                        // Attempt to route without the last small cheap channel
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 90_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -5451,7 +5602,10 @@ mod tests {
 
                {
                        // Now ensure the route flows simply over nodes 1 and 4 to 6.
-                       let route = get_route(&our_id, &payment_params, &network.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 10_000);
+                       let route = get_route(&our_id, &route_params, &network.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 3);
 
@@ -5522,7 +5676,10 @@ mod tests {
                {
                        // Now, attempt to route 90 sats, which is exactly 90 sats at the last hop, plus the
                        // 200% fee charged channel 13 in the 1-to-2 direction.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 90_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 2);
 
@@ -5588,7 +5745,10 @@ mod tests {
                        // Now, attempt to route 90 sats, hitting the htlc_minimum on channel 4, but
                        // overshooting the htlc_maximum on channel 2. Thus, we should pick the (absurdly
                        // expensive) channels 12-13 path.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 90_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 2);
 
@@ -5627,10 +5787,12 @@ mod tests {
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
                {
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 100_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 200_000),
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 10_000),
-                       ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 1);
 
@@ -5639,10 +5801,12 @@ mod tests {
                        assert_eq!(route.paths[0].hops[0].fee_msat, 100_000);
                }
                {
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 100_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 50_000),
-                       ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
                        assert_eq!(route.paths[0].hops.len(), 1);
                        assert_eq!(route.paths[1].hops.len(), 1);
@@ -5665,7 +5829,9 @@ mod tests {
                        // If we have several options above the 3xpayment value threshold, we should pick the
                        // smallest of them, avoiding further fragmenting our available outbound balance to
                        // this node.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 100_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(5), nodes[0], channelmanager::provided_init_features(&config), 50_000),
@@ -5674,7 +5840,7 @@ mod tests {
                                &get_channel_details(Some(8), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(9), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(4), nodes[0], channelmanager::provided_init_features(&config), 1_000_000),
-                       ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 1);
 
@@ -5694,10 +5860,10 @@ mod tests {
                let scorer = ln_test_utils::TestScorer::new();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph.read_only(), None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let route = get_route( &our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 100);
@@ -5707,10 +5873,10 @@ mod tests {
                // Applying a 100 msat penalty to each hop results in taking channels 7 and 10 to nodes[6]
                // from nodes[2] rather than channel 6, 11, and 8, even though the longer path is cheaper.
                let scorer = FixedPenaltyScorer::with_penalty(100);
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph.read_only(), None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100);
+               let route = get_route( &our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 300);
@@ -5726,16 +5892,11 @@ mod tests {
        impl Writeable for BadChannelScorer {
                fn write<W: Writer>(&self, _w: &mut W) -> Result<(), crate::io::Error> { unimplemented!() }
        }
-       impl Score for BadChannelScorer {
+       impl ScoreLookUp for BadChannelScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(&self, short_channel_id: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
                        if short_channel_id == self.short_channel_id { u64::max_value() } else { 0 }
                }
-
-               fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn payment_path_successful(&mut self, _path: &Path) {}
-               fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn probe_successful(&mut self, _path: &Path) {}
        }
 
        struct BadNodeScorer {
@@ -5747,16 +5908,11 @@ mod tests {
                fn write<W: Writer>(&self, _w: &mut W) -> Result<(), crate::io::Error> { unimplemented!() }
        }
 
-       impl Score for BadNodeScorer {
+       impl ScoreLookUp for BadNodeScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(&self, _: u64, _: &NodeId, target: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
                        if *target == self.node_id { u64::max_value() } else { 0 }
                }
-
-               fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn payment_path_successful(&mut self, _path: &Path) {}
-               fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn probe_successful(&mut self, _path: &Path) {}
        }
 
        #[test]
@@ -5770,10 +5926,10 @@ mod tests {
                let scorer = ln_test_utils::TestScorer::new();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100);
+               let route = get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 100);
@@ -5782,10 +5938,8 @@ mod tests {
 
                // A different path to nodes[6] exists if channel 6 cannot be routed over.
                let scorer = BadChannelScorer { short_channel_id: 6 };
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route = get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 300);
@@ -5794,14 +5948,12 @@ mod tests {
 
                // A path to nodes[6] does not exist if nodes[2] cannot be routed through.
                let scorer = BadNodeScorer { node_id: NodeId::from_pubkey(&nodes[2]) };
-               match get_route(
-                       &our_id, &payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ) {
-                       Err(LightningError { err, .. } ) => {
-                               assert_eq!(err, "Failed to find a path to the given destination");
-                       },
-                       Ok(_) => panic!("Expected error"),
+               match get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes) {
+                               Err(LightningError { err, .. } ) => {
+                                       assert_eq!(err, "Failed to find a path to the given destination");
+                               },
+                               Ok(_) => panic!("Expected error"),
                }
        }
 
@@ -5825,7 +5977,7 @@ mod tests {
                                        short_channel_id: 0, fee_msat: 225, cltv_expiry_delta: 0
                                },
                        ], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
 
                assert_eq!(route.get_total_fees(), 250);
@@ -5858,7 +6010,7 @@ mod tests {
                                        short_channel_id: 0, fee_msat: 150, cltv_expiry_delta: 0
                                },
                        ], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
 
                assert_eq!(route.get_total_fees(), 200);
@@ -5870,7 +6022,7 @@ mod tests {
                // In an earlier version of `Route::get_total_fees` and `Route::get_total_amount`, they
                // would both panic if the route was completely empty. We test to ensure they return 0
                // here, even though its somewhat nonsensical as a route.
-               let route = Route { paths: Vec::new(), payment_params: None };
+               let route = Route { paths: Vec::new(), route_params: None };
 
                assert_eq!(route.get_total_fees(), 0);
                assert_eq!(route.get_total_amount(), 0);
@@ -5890,7 +6042,10 @@ mod tests {
                        .with_max_total_cltv_expiry_delta(feasible_max_total_cltv_delta);
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       feasible_payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert_ne!(path.len(), 0);
 
@@ -5898,7 +6053,10 @@ mod tests {
                let fail_max_total_cltv_delta = 23;
                let fail_payment_params = PaymentParameters::from_node_id(nodes[6], 0).with_route_hints(last_hops(&nodes)).unwrap()
                        .with_max_total_cltv_expiry_delta(fail_max_total_cltv_delta);
-               match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       fail_payment_params, 100);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
@@ -5923,9 +6081,16 @@ mod tests {
 
                // We should be able to find a route initially, and then after we fail a few random
                // channels eventually we won't be able to any longer.
-               assert!(get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).is_ok());
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               assert!(get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).is_ok());
                loop {
-                       if let Ok(route) = get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 100);
+                       if let Ok(route) = get_route(&our_id, &route_params, &network_graph, None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
+                       {
                                for chan in route.paths[0].hops.iter() {
                                        assert!(!payment_params.previously_failed_channels.contains(&chan.short_channel_id));
                                }
@@ -5948,15 +6113,19 @@ mod tests {
 
                // First check we can actually create a long route on this graph.
                let feasible_payment_params = PaymentParameters::from_node_id(nodes[18], 0);
-               let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       feasible_payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert!(path.len() == MAX_PATH_LENGTH_ESTIMATE.into());
 
                // But we can't create a path surpassing the MAX_PATH_LENGTH_ESTIMATE limit.
                let fail_payment_params = PaymentParameters::from_node_id(nodes[19], 0);
-               match get_route(&our_id, &fail_payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       fail_payment_params, 100);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
@@ -5975,7 +6144,10 @@ mod tests {
                let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops(&nodes)).unwrap();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 1);
 
                let cltv_expiry_deltas_before = route.paths[0].hops.iter().map(|h| h.cltv_expiry_delta).collect::<Vec<u32>>();
@@ -6009,8 +6181,10 @@ mod tests {
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[4u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
-               let mut route = get_route(&our_id, &payment_params, &network_graph, None, 100,
-                                                                 Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let mut route = get_route(&our_id, &route_params, &network_graph, None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                add_random_cltv_offset(&mut route, &payment_params, &network_graph, &random_seed_bytes);
 
                let mut path_plausibility = vec![];
@@ -6074,8 +6248,9 @@ mod tests {
 
                let payment_params = PaymentParameters::from_node_id(nodes[3], 0);
                let hops = [nodes[1], nodes[2], nodes[4], nodes[3]];
-               let route = build_route_from_hops_internal(&our_id, &hops, &payment_params,
-                        &network_graph, 100, Arc::clone(&logger), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = build_route_from_hops_internal(&our_id, &hops, &route_params, &network_graph,
+                       Arc::clone(&logger), &random_seed_bytes).unwrap();
                let route_hop_pubkeys = route.paths[0].hops.iter().map(|hop| hop.pubkey).collect::<Vec<_>>();
                assert_eq!(hops.len(), route.paths[0].hops.len());
                for (idx, hop_pubkey) in hops.iter().enumerate() {
@@ -6122,7 +6297,10 @@ mod tests {
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
                // 100,000 sats is less than the available liquidity on each channel, set above.
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000_000, Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100_000_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!((route.paths[0].hops[1].short_channel_id == 4 && route.paths[1].hops[1].short_channel_id == 13) ||
                        (route.paths[1].hops[1].short_channel_id == 4 && route.paths[0].hops[1].short_channel_id == 13));
@@ -6223,17 +6401,22 @@ mod tests {
 
                // Then check we can get a normal route
                let payment_params = PaymentParameters::from_node_id(nodes[10], 42);
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_ok());
 
                // Then check that we can't get a route if we ban an intermediate node.
                scorer_params.add_banned(&NodeId::from_pubkey(&nodes[3]));
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_err());
 
                // Finally make sure we can route again, when we remove the ban.
                scorer_params.remove_banned(&NodeId::from_pubkey(&nodes[3]));
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_ok());
        }
 
@@ -6268,8 +6451,10 @@ mod tests {
 
                // Make sure we'll error if our route hints don't have enough liquidity according to their
                // htlc_maximum_msat.
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, max_htlc_msat + 1);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
-                       &payment_params, &netgraph, None, max_htlc_msat + 1, Arc::clone(&logger), &scorer, &(),
+                       &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes)
                {
                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
@@ -6281,8 +6466,10 @@ mod tests {
                let payment_params = PaymentParameters::from_node_id(dest_node_id, 42)
                        .with_route_hints(vec![route_hint_1, route_hint_2]).unwrap()
                        .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
-               let route = get_route(&our_id, &payment_params, &netgraph, None, max_htlc_msat + 1,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, max_htlc_msat + 1);
+               let route = get_route(&our_id, &route_params, &netgraph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
                assert!(route.paths[1].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6335,8 +6522,10 @@ mod tests {
                        .with_route_hints(vec![route_hint_1, route_hint_2]).unwrap()
                        .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
 
-               let route = get_route(&our_node_id, &payment_params, &network_graph.read_only(),
-                       Some(&first_hop.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
+                       Some(&first_hop.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6349,8 +6538,8 @@ mod tests {
                        get_channel_details(Some(42), intermed_node_id, InitFeatures::from_le_bytes(vec![0b11]), amt_msat - 10),
                        get_channel_details(Some(43), intermed_node_id, InitFeatures::from_le_bytes(vec![0b11]), amt_msat - 10),
                ];
-               let route = get_route(&our_node_id, &payment_params, &network_graph.read_only(),
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6379,8 +6568,10 @@ mod tests {
                        (blinded_payinfo.clone(), blinded_path.clone()),
                        (blinded_payinfo.clone(), blinded_path.clone())])
                        .with_bolt12_features(bolt12_features).unwrap();
-               let route = get_route(&our_node_id, &payment_params, &network_graph.read_only(),
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6430,7 +6621,7 @@ mod tests {
                                fee_msat: 100,
                                cltv_expiry_delta: 0,
                        }], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
                let encoded_route = route.encode();
                let decoded_route: Route = Readable::read(&mut Cursor::new(&encoded_route[..])).unwrap();
@@ -6524,7 +6715,7 @@ mod tests {
                                excess_final_cltv_expiry_delta: 0,
                                final_value_msat: 200,
                        }),
-               }], payment_params: None};
+               }], route_params: None};
 
                let payment_params = PaymentParameters::from_node_id(ln_test_utils::pubkey(47), 18);
                let (_, network_graph, _, _, _) = build_line_graph();
@@ -6569,9 +6760,10 @@ mod tests {
                        features: BlindedHopFeatures::empty(),
                };
 
-               let final_amt_msat = 1001;
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo.clone(), blinded_path.clone())]);
-               let route = get_route(&our_id, &payment_params, &network_graph, None, final_amt_msat , Arc::clone(&logger),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 1001);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
                        &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 1);
                assert_eq!(route.paths[0].hops.len(), 2);
@@ -6625,7 +6817,8 @@ mod tests {
                let payment_params = PaymentParameters::blinded(vec![
                        (blinded_payinfo.clone(), invalid_blinded_path.clone()),
                        (blinded_payinfo.clone(), invalid_blinded_path_2)]);
-               match get_route(&our_id, &payment_params, &network_graph, None, 1001, Arc::clone(&logger),
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
                        &scorer, &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
@@ -6636,8 +6829,9 @@ mod tests {
 
                invalid_blinded_path.introduction_node_id = our_id;
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo.clone(), invalid_blinded_path.clone())]);
-               match get_route(&our_id, &payment_params, &network_graph, None, 1001, Arc::clone(&logger),
-                       &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "Cannot generate a route to blinded paths if we are the introduction node to all of them");
@@ -6648,8 +6842,9 @@ mod tests {
                invalid_blinded_path.introduction_node_id = ln_test_utils::pubkey(46);
                invalid_blinded_path.blinded_hops.clear();
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo, invalid_blinded_path)]);
-               match get_route(&our_id, &payment_params, &network_graph, None, 1001, Arc::clone(&logger),
-                       &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "0-hop blinded path provided");
@@ -6701,8 +6896,9 @@ mod tests {
                let payment_params = PaymentParameters::blinded(blinded_hints.clone())
                        .with_bolt12_features(bolt12_features.clone()).unwrap();
 
-               let route = get_route(&our_id, &payment_params, &network_graph, None,
-                       100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100_000);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                let mut total_amount_paid_msat = 0;
                for path in route.paths.into_iter() {
@@ -6788,17 +6984,21 @@ mod tests {
                let payment_params = PaymentParameters::blinded(blinded_hints.clone());
 
                let netgraph = network_graph.read_only();
-               if let Err(LightningError { err, .. }) = get_route(&nodes[0], &payment_params, &netgraph,
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), amt_msat);
+               if let Err(LightningError { err, .. }) = get_route(&nodes[0], &route_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!("Expected error") }
 
                // Sending an exact amount accounting for the blinded path fee works.
                let amt_minus_blinded_path_fee = amt_msat - blinded_payinfo.fee_base_msat as u64;
-               let route = get_route(&nodes[0], &payment_params, &netgraph,
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_minus_blinded_path_fee,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_minus_blinded_path_fee);
+               let route = get_route(&nodes[0], &route_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_minus_blinded_path_fee);
        }
@@ -6863,9 +7063,11 @@ mod tests {
                        .with_bolt12_features(bolt12_features.clone()).unwrap();
 
                let netgraph = network_graph.read_only();
-               let route = get_route(&nodes[0], &payment_params, &netgraph,
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(&nodes[0], &route_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_msat);
        }
@@ -6880,7 +7082,9 @@ pub(crate) mod bench_utils {
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
 
        use crate::chain::transaction::OutPoint;
+       use crate::routing::scoring::ScoreUpdate;
        use crate::sign::{EntropySource, KeysManager};
+       use crate::ln::ChannelId;
        use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
        use crate::ln::features::Bolt11InvoiceFeatures;
        use crate::routing::gossip::NetworkGraph;
@@ -6934,7 +7138,7 @@ pub(crate) mod bench_utils {
        #[inline]
        pub(crate) fn first_hop(node_id: PublicKey) -> ChannelDetails {
                ChannelDetails {
-                       channel_id: [0; 32],
+                       channel_id: ChannelId::new_zero(),
                        counterparty: ChannelCounterparty {
                                features: channelmanager::provided_init_features(&UserConfig::default()),
                                node_id,
@@ -6972,7 +7176,7 @@ pub(crate) mod bench_utils {
                }
        }
 
-       pub(crate) fn generate_test_routes<S: Score>(graph: &NetworkGraph<&TestLogger>, scorer: &mut S,
+       pub(crate) fn generate_test_routes<S: ScoreLookUp + ScoreUpdate>(graph: &NetworkGraph<&TestLogger>, scorer: &mut S,
                score_params: &S::ScoreParams, features: Bolt11InvoiceFeatures, mut seed: u64,
                starting_amount: u64, route_count: usize,
        ) -> Vec<(ChannelDetails, PaymentParameters, u64)> {
@@ -6995,10 +7199,12 @@ pub(crate) mod bench_utils {
                                let params = PaymentParameters::from_node_id(dst, 42)
                                        .with_bolt11_features(features.clone()).unwrap();
                                let first_hop = first_hop(src);
-                               let amt = starting_amount + seed % 1_000_000;
+                               let amt_msat = starting_amount + seed % 1_000_000;
+                               let route_params = RouteParameters::from_payment_params_and_value(
+                                       params.clone(), amt_msat);
                                let path_exists =
-                                       get_route(&payer, &params, &graph.read_only(), Some(&[&first_hop]),
-                                               amt, &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok();
+                                       get_route(&payer, &route_params, &graph.read_only(), Some(&[&first_hop]),
+                                               &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok();
                                if path_exists {
                                        // ...and seed the scorer with success and failure data...
                                        seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
@@ -7010,10 +7216,11 @@ pub(crate) mod bench_utils {
                                                let mpp_features = channelmanager::provided_invoice_features(&UserConfig::default());
                                                let params = PaymentParameters::from_node_id(dst, 42)
                                                        .with_bolt11_features(mpp_features).unwrap();
-
-                                               let route_res = get_route(&payer, &params, &graph.read_only(),
-                                                       Some(&[&first_hop]), score_amt, &TestLogger::new(), &scorer,
-                                                       score_params, &random_seed_bytes);
+                                               let route_params = RouteParameters::from_payment_params_and_value(
+                                                       params.clone(), score_amt);
+                                               let route_res = get_route(&payer, &route_params, &graph.read_only(),
+                                                       Some(&[&first_hop]), &TestLogger::new(), scorer, score_params,
+                                                       &random_seed_bytes);
                                                if let Ok(route) = route_res {
                                                        for path in route.paths {
                                                                if seed & 0x80 == 0 {
@@ -7030,7 +7237,7 @@ pub(crate) mod bench_utils {
                                                score_amt /= 100;
                                        }
 
-                                       route_endpoints.push((first_hop, params, amt));
+                                       route_endpoints.push((first_hop, params, amt_msat));
                                        break;
                                }
                        }
@@ -7039,9 +7246,11 @@ pub(crate) mod bench_utils {
                // Because we've changed channel scores, it's possible we'll take different routes to the
                // selected destinations, possibly causing us to fail because, eg, the newly-selected path
                // requires a too-high CLTV delta.
-               route_endpoints.retain(|(first_hop, params, amt)| {
-                       get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
-                               &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok()
+               route_endpoints.retain(|(first_hop, params, amt_msat)| {
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               params.clone(), *amt_msat);
+                       get_route(&payer, &route_params, &graph.read_only(), Some(&[first_hop]),
+                               &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok()
                });
                route_endpoints.truncate(route_count);
                assert_eq!(route_endpoints.len(), route_count);
@@ -7052,6 +7261,7 @@ pub(crate) mod bench_utils {
 #[cfg(ldk_bench)]
 pub mod benches {
        use super::*;
+       use crate::routing::scoring::{ScoreUpdate, ScoreLookUp};
        use crate::sign::{EntropySource, KeysManager};
        use crate::ln::channelmanager;
        use crate::ln::features::Bolt11InvoiceFeatures;
@@ -7114,7 +7324,7 @@ pub mod benches {
                        "generate_large_mpp_routes_with_probabilistic_scorer");
        }
 
-       fn generate_routes<S: Score>(
+       fn generate_routes<S: ScoreLookUp + ScoreUpdate>(
                bench: &mut Criterion, graph: &NetworkGraph<&TestLogger>, mut scorer: S,
                score_params: &S::ScoreParams, features: Bolt11InvoiceFeatures, starting_amount: u64,
                bench_name: &'static str,
@@ -7130,7 +7340,8 @@ pub mod benches {
                let mut idx = 0;
                bench.bench_function(bench_name, |b| b.iter(|| {
                        let (first_hop, params, amt) = &route_endpoints[idx % route_endpoints.len()];
-                       assert!(get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+                       let route_params = RouteParameters::from_payment_params_and_value(params.clone(), *amt);
+                       assert!(get_route(&payer, &route_params, &graph.read_only(), Some(&[first_hop]),
                                &DummyLogger{}, &scorer, score_params, &random_seed_bytes).is_ok());
                        idx += 1;
                }));
index 5fdbf9ae3a9c8eff91ba0948ec86d8f9f3f7bdec..26d555819d35c15e2f8aff22b0667d5e6ba49500 100644 (file)
@@ -10,7 +10,7 @@
 //! Utilities for scoring payment channels.
 //!
 //! [`ProbabilisticScorer`] may be given to [`find_route`] to score payment channels during path
-//! finding when a custom [`Score`] implementation is not needed.
+//! finding when a custom [`ScoreLookUp`] implementation is not needed.
 //!
 //! # Example
 //!
@@ -65,12 +65,12 @@ use crate::util::time::Time;
 
 use crate::prelude::*;
 use core::{cmp, fmt};
-use core::cell::{RefCell, RefMut};
+use core::cell::{RefCell, RefMut, Ref};
 use core::convert::TryInto;
 use core::ops::{Deref, DerefMut};
 use core::time::Duration;
 use crate::io::{self, Read};
-use crate::sync::{Mutex, MutexGuard};
+use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
 
 /// We define Score ever-so-slightly differently based on whether we are being built for C bindings
 /// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
@@ -86,8 +86,10 @@ use crate::sync::{Mutex, MutexGuard};
 macro_rules! define_score { ($($supertrait: path)*) => {
 /// An interface used to score payment channels for path finding.
 ///
-///    Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
-pub trait Score $(: $supertrait)* {
+/// `ScoreLookUp` is used to determine the penalty for a given channel.
+///
+/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
+pub trait ScoreLookUp $(: $supertrait)* {
        /// A configurable type which should contain various passed-in parameters for configuring the scorer,
        /// on a per-routefinding-call basis through to the scorer methods,
        /// which are used to determine the parameters for the suitability of channels for use.
@@ -103,7 +105,10 @@ pub trait Score $(: $supertrait)* {
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64;
+}
 
+/// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
+pub trait ScoreUpdate $(: $supertrait)* {
        /// Handles updating channel penalties after failing to route through a channel.
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
 
@@ -117,14 +122,16 @@ pub trait Score $(: $supertrait)* {
        fn probe_successful(&mut self, path: &Path);
 }
 
-impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
-       type ScoreParams = S::ScoreParams;
+impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supertrait)*> ScoreLookUp for T {
+       type ScoreParams = SP;
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64 {
                self.deref().channel_penalty_msat(short_channel_id, source, target, usage, score_params)
        }
+}
 
+impl<S: ScoreUpdate, T: DerefMut<Target=S> $(+ $supertrait)*> ScoreUpdate for T {
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
                self.deref_mut().payment_path_failed(path, short_channel_id)
        }
@@ -145,26 +152,35 @@ impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
 
 #[cfg(c_bindings)]
 define_score!(Writeable);
+
 #[cfg(not(c_bindings))]
 define_score!();
 
 /// A scorer that is accessed under a lock.
 ///
-/// Needed so that calls to [`Score::channel_penalty_msat`] in [`find_route`] can be made while
-/// having shared ownership of a scorer but without requiring internal locking in [`Score`]
+/// Needed so that calls to [`ScoreLookUp::channel_penalty_msat`] in [`find_route`] can be made while
+/// having shared ownership of a scorer but without requiring internal locking in [`ScoreUpdate`]
 /// implementations. Internal locking would be detrimental to route finding performance and could
-/// result in [`Score::channel_penalty_msat`] returning a different value for the same channel.
+/// result in [`ScoreLookUp::channel_penalty_msat`] returning a different value for the same channel.
 ///
 /// [`find_route`]: crate::routing::router::find_route
 pub trait LockableScore<'a> {
-       /// The [`Score`] type.
-       type Score: 'a + Score;
+       /// The [`ScoreUpdate`] type.
+       type ScoreUpdate: 'a + ScoreUpdate;
+       /// The [`ScoreLookUp`] type.
+       type ScoreLookUp: 'a + ScoreLookUp;
+
+       /// The write locked [`ScoreUpdate`] type.
+       type WriteLocked: DerefMut<Target = Self::ScoreUpdate> + Sized;
 
-       /// The locked [`Score`] type.
-       type Locked: DerefMut<Target = Self::Score> + Sized;
+       /// The read locked [`ScoreLookUp`] type.
+       type ReadLocked: Deref<Target = Self::ScoreLookUp> + Sized;
 
-       /// Returns the locked scorer.
-       fn lock(&'a self) -> Self::Locked;
+       /// Returns read locked scorer.
+       fn read_lock(&'a self) -> Self::ReadLocked;
+
+       /// Returns write locked scorer.
+       fn write_lock(&'a self) -> Self::WriteLocked;
 }
 
 /// Refers to a scorer that is accessible under lock and also writeable to disk
@@ -176,89 +192,138 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
 #[cfg(not(c_bindings))]
 impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
 #[cfg(not(c_bindings))]
-impl<'a, T: 'a + Score> LockableScore<'a> for Mutex<T> {
-       type Score = T;
-       type Locked = MutexGuard<'a, T>;
+impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+
+       type WriteLocked = MutexGuard<'a, Self::ScoreUpdate>;
+       type ReadLocked = MutexGuard<'a, Self::ScoreLookUp>;
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               Mutex::lock(self).unwrap()
+       }
 
-       fn lock(&'a self) -> Self::Locked {
+       fn write_lock(&'a self) -> Self::WriteLocked {
                Mutex::lock(self).unwrap()
        }
 }
 
 #[cfg(not(c_bindings))]
-impl<'a, T: 'a + Score> LockableScore<'a> for RefCell<T> {
-       type Score = T;
-       type Locked = RefMut<'a, T>;
+impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
 
-       fn lock(&'a self) -> Self::Locked {
+       type WriteLocked = RefMut<'a, Self::ScoreUpdate>;
+       type ReadLocked = Ref<'a, Self::ScoreLookUp>;
+
+       fn write_lock(&'a self) -> Self::WriteLocked {
                self.borrow_mut()
        }
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               self.borrow()
+       }
+}
+
+#[cfg(not(c_bindings))]
+impl<'a, SP:Sized,  T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> LockableScore<'a> for RwLock<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+
+       type WriteLocked = RwLockWriteGuard<'a, Self::ScoreLookUp>;
+       type ReadLocked = RwLockReadGuard<'a, Self::ScoreUpdate>;
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               RwLock::read(self).unwrap()
+       }
+
+       fn write_lock(&'a self) -> Self::WriteLocked {
+               RwLock::write(self).unwrap()
+       }
 }
 
 #[cfg(c_bindings)]
 /// A concrete implementation of [`LockableScore`] which supports multi-threading.
-pub struct MultiThreadedLockableScore<T: Score> {
-       score: Mutex<T>,
+pub struct MultiThreadedLockableScore<T: ScoreLookUp + ScoreUpdate> {
+       score: RwLock<T>,
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> LockableScore<'a> for MultiThreadedLockableScore<T> {
-       type Score = T;
-       type Locked = MultiThreadedScoreLock<'a, T>;
+impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+       type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
+       type ReadLocked = MultiThreadedScoreLockRead<'a, Self::ScoreLookUp>;
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               MultiThreadedScoreLockRead(self.score.read().unwrap())
+       }
 
-       fn lock(&'a self) -> Self::Locked {
-               MultiThreadedScoreLock(Mutex::lock(&self.score).unwrap())
+       fn write_lock(&'a self) -> Self::WriteLocked {
+               MultiThreadedScoreLockWrite(self.score.write().unwrap())
        }
 }
 
 #[cfg(c_bindings)]
-impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
+impl<T: ScoreUpdate + ScoreLookUp> Writeable for MultiThreadedLockableScore<T> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               self.lock().write(writer)
+               self.score.read().unwrap().write(writer)
        }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
+impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
 
 #[cfg(c_bindings)]
-impl<T: Score> MultiThreadedLockableScore<T> {
+impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
        /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
        pub fn new(score: T) -> Self {
-               MultiThreadedLockableScore { score: Mutex::new(score) }
+               MultiThreadedLockableScore { score: RwLock::new(score) }
        }
 }
 
 #[cfg(c_bindings)]
 /// A locked `MultiThreadedLockableScore`.
-pub struct MultiThreadedScoreLock<'a, T: Score>(MutexGuard<'a, T>);
+pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>);
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> Writeable for MultiThreadedScoreLock<'a, T> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               self.0.write(writer)
+/// A locked `MultiThreadedLockableScore`.
+pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>);
+
+#[cfg(c_bindings)]
+impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
+       type Target = T;
+
+       fn deref(&self) -> &Self::Target {
+               self.0.deref()
        }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLock<'a, T> {
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        self.0.deref_mut()
-    }
+impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               self.0.write(writer)
+       }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLock<'a, T> {
+impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
        type Target = T;
 
-    fn deref(&self) -> &Self::Target {
-        self.0.deref()
-    }
+       fn deref(&self) -> &Self::Target {
+               self.0.deref()
+       }
 }
 
+#[cfg(c_bindings)]
+impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
+       fn deref_mut(&mut self) -> &mut Self::Target {
+               self.0.deref_mut()
+       }
+}
 
 
-/// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
+/// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
 #[derive(Clone, Copy, Debug, PartialEq)]
 pub struct ChannelUsage {
        /// The amount to send through the channel, denominated in millisatoshis.
@@ -273,7 +338,7 @@ pub struct ChannelUsage {
 }
 
 #[derive(Clone)]
-/// [`Score`] implementation that uses a fixed penalty.
+/// [`ScoreLookUp`] implementation that uses a fixed penalty.
 pub struct FixedPenaltyScorer {
        penalty_msat: u64,
 }
@@ -285,12 +350,14 @@ impl FixedPenaltyScorer {
        }
 }
 
-impl Score for FixedPenaltyScorer {
+impl ScoreLookUp for FixedPenaltyScorer {
        type ScoreParams = ();
        fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
                self.penalty_msat
        }
+}
 
+impl ScoreUpdate for FixedPenaltyScorer {
        fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
 
        fn payment_path_successful(&mut self, _path: &Path) {}
@@ -323,7 +390,7 @@ use crate::util::time::Eternity;
 #[cfg(feature = "no-std")]
 type ConfiguredTime = Eternity;
 
-/// [`Score`] implementation using channel success probability distributions.
+/// [`ScoreLookUp`] implementation using channel success probability distributions.
 ///
 /// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
 /// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
@@ -361,7 +428,7 @@ type ConfiguredTime = Eternity;
 /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
 pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
 
-/// Probabilistic [`Score`] implementation.
+/// Probabilistic [`ScoreLookUp`] implementation.
 ///
 /// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
 pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
@@ -1142,7 +1209,7 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
        }
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        type ScoreParams = ProbabilisticScoringFeeParameters;
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
@@ -1185,7 +1252,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for Probabilis
                        .saturating_add(anti_probing_penalty_msat)
                        .saturating_add(base_penalty_msat)
        }
+}
 
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
                let amount_msat = path.final_value_msat();
                log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
@@ -1972,7 +2041,7 @@ mod tests {
        use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
        use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
        use crate::routing::router::{BlindedTail, Path, RouteHop};
-       use crate::routing::scoring::{ChannelUsage, Score};
+       use crate::routing::scoring::{ChannelUsage, ScoreLookUp, ScoreUpdate};
        use crate::util::ser::{ReadableArgs, Writeable};
        use crate::util::test_utils::{self, TestLogger};
 
diff --git a/lightning/src/util/base32.rs b/lightning/src/util/base32.rs
new file mode 100644 (file)
index 0000000..2e66d59
--- /dev/null
@@ -0,0 +1,273 @@
+// This is a modification of base32 encoding to support the zbase32 alphabet.
+// The original piece of software can be found at https://crates.io/crates/base32(v0.4.0)
+// The original portions of this software are Copyright (c) 2015 The base32 Developers
+
+// This file is licensed under either of
+// Apache License, Version 2.0, (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
+// MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT) at your option.
+
+
+use crate::prelude::*;
+
+/// RFC4648 encoding table
+const RFC4648_ALPHABET: &'static [u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
+
+/// Zbase encoding alphabet
+const ZBASE_ALPHABET: &'static [u8] = b"ybndrfg8ejkmcpqxot1uwisza345h769";
+
+/// RFC4648 decoding table
+const RFC4648_INV_ALPHABET: [i8; 43] = [
+       -1, -1, 26, 27, 28, 29, 30, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8,
+       9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+];
+
+/// Zbase decoding table
+const ZBASE_INV_ALPHABET: [i8; 43] = [
+       -1, 18, -1, 25, 26, 27, 30, 29, 7, 31, -1, -1, -1, -1, -1, -1, -1, 24, 1, 12, 3, 8, 5, 6, 28,
+       21, 9, 10, -1, 11, 2, 16, 13, 14, 4, 22, 17, 19, -1, 20, 15, 0, 23,
+];
+
+/// Alphabet used for encoding and decoding.
+#[derive(Copy, Clone)]
+pub enum Alphabet {
+       /// RFC4648 encoding.
+       RFC4648 {
+               /// Whether to use padding.
+               padding: bool
+       },
+       /// Zbase32 encoding.
+       ZBase32
+}
+
+impl Alphabet {
+       /// Encode bytes into a base32 string.
+       pub fn encode(&self, data: &[u8]) -> String {
+               // output_length is calculated as follows:
+               // / 5 divides the data length by the number of bits per chunk (5),
+               // * 8 multiplies the result by the number of characters per chunk (8).
+               // + 4 rounds up to the nearest character.
+               let output_length = (data.len() * 8 + 4) / 5;
+               let mut ret = match self {
+                       Self::RFC4648 { padding } => {
+                               let mut ret = Self::encode_data(data, RFC4648_ALPHABET);
+                               if *padding {
+                                       let len = ret.len();
+                                       for i in output_length..len {
+                                               ret[i] = b'=';
+                                       }
+
+                                       return String::from_utf8(ret).expect("Invalid UTF-8");
+                               }
+                               ret
+                       },
+                       Self::ZBase32 => {
+                               Self::encode_data(data, ZBASE_ALPHABET)
+                       },
+               };
+               ret.truncate(output_length);
+
+               #[cfg(fuzzing)]
+               assert_eq!(ret.capacity(), (data.len() + 4) / 5 * 8);
+
+               String::from_utf8(ret).expect("Invalid UTF-8")
+       }
+
+       /// Decode a base32 string into a byte vector.
+       pub fn decode(&self, data: &str) -> Result<Vec<u8>, ()> {
+               let data = data.as_bytes();
+               let (data, alphabet) = match self {
+                       Self::RFC4648 { padding } => {
+                               let mut unpadded_data_length = data.len();
+                               if *padding {
+                                       if data.len() % 8 != 0 { return Err(()); }
+                                       data.iter().rev().take(6).for_each(|&c| {
+                                               if c == b'=' {
+                                                       unpadded_data_length -= 1;
+                                               }
+                                       });
+                               }
+                               (&data[..unpadded_data_length], RFC4648_INV_ALPHABET)
+                       },
+                       Self::ZBase32 => {
+                               (data, ZBASE_INV_ALPHABET)
+                       }
+               };
+               // If the string has more characters than are required to alphabet_encode the number of bytes
+               // decodable, treat the string as invalid.
+               match data.len() % 8 { 1|3|6 => return Err(()), _ => {} }
+               Ok(Self::decode_data(data, alphabet)?)
+       }
+
+       /// Encode a byte slice into a base32 string.
+       fn encode_data(data: &[u8], alphabet: &'static [u8]) -> Vec<u8> {
+               // cap is calculated as follows:
+               // / 5 divides the data length by the number of bits per chunk (5),
+               // * 8 multiplies the result by the number of characters per chunk (8).
+               // + 4 rounds up to the nearest character.
+               let cap = (data.len() + 4) / 5 * 8;
+               let mut ret = Vec::with_capacity(cap);
+               for chunk in data.chunks(5) {
+                       let mut buf = [0u8; 5];
+                       for (i, &b) in chunk.iter().enumerate() {
+                               buf[i] = b;
+                       }
+                       ret.push(alphabet[((buf[0] & 0xF8) >> 3) as usize]);
+                       ret.push(alphabet[(((buf[0] & 0x07) << 2) | ((buf[1] & 0xC0) >> 6)) as usize]);
+                       ret.push(alphabet[((buf[1] & 0x3E) >> 1) as usize]);
+                       ret.push(alphabet[(((buf[1] & 0x01) << 4) | ((buf[2] & 0xF0) >> 4)) as usize]);
+                       ret.push(alphabet[(((buf[2] & 0x0F) << 1) | (buf[3] >> 7)) as usize]);
+                       ret.push(alphabet[((buf[3] & 0x7C) >> 2) as usize]);
+                       ret.push(alphabet[(((buf[3] & 0x03) << 3) | ((buf[4] & 0xE0) >> 5)) as usize]);
+                       ret.push(alphabet[(buf[4] & 0x1F) as usize]);
+               }
+               #[cfg(fuzzing)]
+               assert_eq!(ret.capacity(), cap);
+
+               ret
+       }
+
+       fn decode_data(data: &[u8], alphabet: [i8; 43]) -> Result<Vec<u8>, ()> {
+               // cap is calculated as follows:
+               // / 8 divides the data length by the number of characters per chunk (8),
+               // * 5 multiplies the result by the number of bits per chunk (5),
+               // + 7 rounds up to the nearest byte.
+               let cap = (data.len() + 7) / 8 * 5;
+               let mut ret = Vec::with_capacity(cap);
+               for chunk in data.chunks(8) {
+                       let mut buf = [0u8; 8];
+                       for (i, &c) in chunk.iter().enumerate() {
+                               match alphabet.get(c.to_ascii_uppercase().wrapping_sub(b'0') as usize) {
+                                       Some(&-1) | None => return Err(()),
+                                       Some(&value) => buf[i] = value as u8,
+                               };
+                       }
+                       ret.push((buf[0] << 3) | (buf[1] >> 2));
+                       ret.push((buf[1] << 6) | (buf[2] << 1) | (buf[3] >> 4));
+                       ret.push((buf[3] << 4) | (buf[4] >> 1));
+                       ret.push((buf[4] << 7) | (buf[5] << 2) | (buf[6] >> 3));
+                       ret.push((buf[6] << 5) | buf[7]);
+               }
+               let output_length = data.len() * 5 / 8;
+               for c in ret.drain(output_length..) {
+                       if c != 0 {
+                               // If the original string had any bits set at positions outside of the encoded data,
+                               // treat the string as invalid.
+                               return Err(());
+                       }
+               }
+
+               // Check that our capacity calculation doesn't under-shoot in fuzzing
+               #[cfg(fuzzing)]
+               assert_eq!(ret.capacity(), cap);
+               Ok(ret)
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+
+       const ZBASE32_TEST_DATA: &[(&str, &[u8])] = &[
+               ("", &[]),
+               ("yy", &[0x00]),
+               ("oy", &[0x80]),
+               ("tqrey", &[0x8b, 0x88, 0x80]),
+               ("6n9hq", &[0xf0, 0xbf, 0xc7]),
+               ("4t7ye", &[0xd4, 0x7a, 0x04]),
+               ("6im5sdy", &[0xf5, 0x57, 0xbb, 0x0c]),
+               ("ybndrfg8ejkmcpqxot1uwisza345h769", &[0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6,
+               0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, 0xd7, 0xc6,
+               0x75, 0xbe, 0x77, 0xdf])
+       ];
+
+       #[test]
+       fn test_zbase32_encode() {
+               for &(zbase32, data) in ZBASE32_TEST_DATA {
+                       assert_eq!(Alphabet::ZBase32.encode(data), zbase32);
+               }
+       }
+
+       #[test]
+       fn test_zbase32_decode() {
+               for &(zbase32, data) in ZBASE32_TEST_DATA {
+                       assert_eq!(Alphabet::ZBase32.decode(zbase32).unwrap(), data);
+               }
+       }
+
+       #[test]
+       fn test_decode_wrong() {
+               const WRONG_DATA: &[&str] = &["00", "l1", "?", "="];
+               for &data in WRONG_DATA {
+                       match Alphabet::ZBase32.decode(data) {
+                               Ok(_) => assert!(false, "Data shouldn't be decodable"),
+                               Err(_) => assert!(true),
+                       }
+               }
+       }
+
+       const RFC4648_NON_PADDED_TEST_VECTORS: &[(&[u8], &[u8])] = &[
+               (&[0xF8, 0x3E, 0x7F, 0x83, 0xE7], b"7A7H7A7H"),
+               (&[0x77, 0xC1, 0xF7, 0x7C, 0x1F], b"O7A7O7A7"),
+               (&[0xF8, 0x3E, 0x7F, 0x83, 0xE7], b"7A7H7A7H"),
+               (&[0x77, 0xC1, 0xF7, 0x7C, 0x1F], b"O7A7O7A7"),
+       ];
+
+       const RFC4648_TEST_VECTORS: &[(&[u8], &str)] = &[
+               (b"", ""),
+               (b"f", "MY======"),
+               (b"fo", "MZXQ===="),
+               (b"foo", "MZXW6==="),
+               (b"foob", "MZXW6YQ="),
+               (b"fooba", "MZXW6YTB"),
+               (b"foobar", "MZXW6YTBOI======"),
+               (&[0xF8, 0x3E, 0x7F, 0x83], "7A7H7AY="),
+       ];
+
+       #[test]
+       fn test_rfc4648_encode() {
+               for (input, encoded) in RFC4648_TEST_VECTORS {
+                       assert_eq!(&Alphabet::RFC4648 { padding: true }.encode(input), encoded);
+               }
+
+               for (input, encoded) in RFC4648_NON_PADDED_TEST_VECTORS {
+                       assert_eq!(&Alphabet::RFC4648 { padding: false }.encode(input).as_bytes(), encoded);
+               }
+       }
+
+       #[test]
+       fn test_rfc4648_decode() {
+               for (input, encoded) in RFC4648_TEST_VECTORS {
+                       let res = &Alphabet::RFC4648 { padding: true }.decode(encoded).unwrap();
+                       assert_eq!(&res[..], &input[..]);
+               }
+
+               for (input, encoded) in RFC4648_NON_PADDED_TEST_VECTORS {
+                       let res = &Alphabet::RFC4648 { padding: false }.decode(std::str::from_utf8(encoded).unwrap()).unwrap();
+                       assert_eq!(&res[..], &input[..]);
+               }
+       }
+
+       #[test]
+       fn padding() {
+               let num_padding = [0, 6, 4, 3, 1];
+               for i in 1..6 {
+                       let encoded = Alphabet::RFC4648 { padding: true }.encode(
+                               (0..(i as u8)).collect::<Vec<u8>>().as_ref()
+                       );
+                       assert_eq!(encoded.len(), 8);
+                       for j in 0..(num_padding[i % 5]) {
+                               assert_eq!(encoded.as_bytes()[encoded.len() - j - 1], b'=');
+                       }
+                       for j in 0..(8 - num_padding[i % 5]) {
+                               assert!(encoded.as_bytes()[j] != b'=');
+                       }
+               }
+       }
+
+       #[test]
+       fn test_decode_rfc4648_errors() {
+               assert!(Alphabet::RFC4648 { padding: false }.decode("abc2def===").is_err()); // Invalid char because padding is disabled
+               assert!(Alphabet::RFC4648 { padding: true }.decode("abc2def===").is_err()); // Invalid length
+               assert!(Alphabet::RFC4648 { padding: true }.decode("MZX=6YTB").is_err()); // Invalid char
+       }
+}
index c729e6847470e024858911793b9273592b0f7fb6..f46b344f2ce144c235e40e041743e707570e43a5 100644 (file)
@@ -159,6 +159,30 @@ mod real_chacha {
                        chacha_bytes
                }
 
+               /// Encrypts `src` into `dest` using a single block from a ChaCha stream. Passing `dest` as
+               /// `src` in a second call will decrypt it.
+               pub fn encrypt_single_block(
+                       key: &[u8; 32], nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
+               ) {
+                       debug_assert_eq!(dest.len(), src.len());
+                       debug_assert!(dest.len() <= 32);
+
+                       let block = ChaCha20::get_single_block(key, nonce);
+                       for i in 0..dest.len() {
+                               dest[i] = block[i] ^ src[i];
+                       }
+               }
+
+               /// Same as `encrypt_single_block` only operates on a fixed-size input in-place.
+               pub fn encrypt_single_block_in_place(
+                       key: &[u8; 32], nonce: &[u8; 16], bytes: &mut [u8; 32]
+               ) {
+                       let block = ChaCha20::get_single_block(key, nonce);
+                       for i in 0..bytes.len() {
+                               bytes[i] = block[i] ^ bytes[i];
+                       }
+               }
+
                fn expand(key: &[u8], nonce: &[u8]) -> ChaChaState {
                        let constant = match key.len() {
                                16 => b"expand 16-byte k",
@@ -290,6 +314,17 @@ mod fuzzy_chacha {
                        [0; 32]
                }
 
+               pub fn encrypt_single_block(
+                       _key: &[u8; 32], _nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
+               ) {
+                       debug_assert_eq!(dest.len(), src.len());
+                       debug_assert!(dest.len() <= 32);
+               }
+
+               pub fn encrypt_single_block_in_place(
+                       _key: &[u8; 32], _nonce: &[u8; 16], _bytes: &mut [u8; 32]
+               ) {}
+
                pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
                        output.copy_from_slice(input);
                }
@@ -618,4 +653,49 @@ mod test {
 
                assert_eq!(ChaCha20::get_single_block(&key, &nonce_16bytes), block_bytes);
        }
+
+       #[test]
+       fn encrypt_single_block() {
+               let key = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+               ];
+               let nonce = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+                       0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+               ];
+               let bytes = [1; 32];
+
+               let mut encrypted_bytes = [0; 32];
+               ChaCha20::encrypt_single_block(&key, &nonce, &mut encrypted_bytes, &bytes);
+
+               let mut decrypted_bytes = [0; 32];
+               ChaCha20::encrypt_single_block(&key, &nonce, &mut decrypted_bytes, &encrypted_bytes);
+
+               assert_eq!(bytes, decrypted_bytes);
+       }
+
+       #[test]
+       fn encrypt_single_block_in_place() {
+               let key = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+               ];
+               let nonce = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+                       0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+               ];
+               let unencrypted_bytes = [1; 32];
+               let mut bytes = unencrypted_bytes;
+
+               ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
+               assert_ne!(bytes, unencrypted_bytes);
+
+               ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
+               assert_eq!(bytes, unencrypted_bytes);
+       }
 }
index 617f71e42c6854cb5d106e7a20d7543b14332e3b..cdd00d92af9c5f3cd0a2fe3a61c3447310a0bba2 100644 (file)
@@ -24,7 +24,7 @@ macro_rules! hkdf_extract_expand {
                let (k1, k2, _) = hkdf_extract_expand!($salt, $ikm);
                (k1, k2)
        }};
-       ($salt: expr, $ikm: expr, 4) => {{
+       ($salt: expr, $ikm: expr, 5) => {{
                let (k1, k2, prk) = hkdf_extract_expand!($salt, $ikm);
 
                let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
@@ -35,7 +35,14 @@ macro_rules! hkdf_extract_expand {
                let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
                hmac.input(&k3);
                hmac.input(&[4; 1]);
-               (k1, k2, k3, Hmac::from_engine(hmac).into_inner())
+               let k4 = Hmac::from_engine(hmac).into_inner();
+
+               let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+               hmac.input(&k4);
+               hmac.input(&[5; 1]);
+               let k5 = Hmac::from_engine(hmac).into_inner();
+
+               (k1, k2, k3, k4, k5)
        }}
 }
 
@@ -43,8 +50,8 @@ pub fn hkdf_extract_expand_twice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32]
        hkdf_extract_expand!(salt, ikm, 2)
 }
 
-pub fn hkdf_extract_expand_4x(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32], [u8; 32]) {
-       hkdf_extract_expand!(salt, ikm, 4)
+pub fn hkdf_extract_expand_5x(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32], [u8; 32], [u8; 32]) {
+       hkdf_extract_expand!(salt, ikm, 5)
 }
 
 #[inline]
diff --git a/lightning/src/util/enforcing_trait_impls.rs b/lightning/src/util/enforcing_trait_impls.rs
deleted file mode 100644 (file)
index df0f13b..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
-use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
-use crate::ln::{chan_utils, msgs, PaymentPreimage};
-use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
-
-use crate::prelude::*;
-use core::cmp;
-use crate::sync::{Mutex, Arc};
-#[cfg(test)] use crate::sync::MutexGuard;
-
-use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
-use bitcoin::util::sighash;
-
-use bitcoin::secp256k1;
-use bitcoin::secp256k1::{SecretKey, PublicKey};
-use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
-use crate::events::bump_transaction::HTLCDescriptor;
-use crate::util::ser::{Writeable, Writer};
-use crate::io::Error;
-use crate::ln::features::ChannelTypeFeatures;
-
-/// Initial value for revoked commitment downward counter
-pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
-
-/// An implementation of Sign that enforces some policy checks.  The current checks
-/// are an incomplete set.  They include:
-///
-/// - When signing, the holder transaction has not been revoked
-/// - When revoking, the holder transaction has not been signed
-/// - The holder commitment number is monotonic and without gaps
-/// - The revoked holder commitment number is monotonic and without gaps
-/// - There is at least one unrevoked holder transaction at all times
-/// - The counterparty commitment number is monotonic and without gaps
-/// - The pre-derived keys and pre-built transaction in CommitmentTransaction were correctly built
-///
-/// Eventually we will probably want to expose a variant of this which would essentially
-/// be what you'd want to run on a hardware wallet.
-///
-/// Note that counterparty signatures on the holder transaction are not checked, but it should
-/// be in a complete implementation.
-///
-/// Note that before we do so we should ensure its serialization format has backwards- and
-/// forwards-compatibility prefix/suffixes!
-#[derive(Clone)]
-pub struct EnforcingSigner {
-       pub inner: InMemorySigner,
-       /// Channel state used for policy enforcement
-       pub state: Arc<Mutex<EnforcementState>>,
-       pub disable_revocation_policy_check: bool,
-}
-
-impl PartialEq for EnforcingSigner {
-       fn eq(&self, o: &Self) -> bool {
-               Arc::ptr_eq(&self.state, &o.state)
-       }
-}
-
-impl EnforcingSigner {
-       /// Construct an EnforcingSigner
-       pub fn new(inner: InMemorySigner) -> Self {
-               let state = Arc::new(Mutex::new(EnforcementState::new()));
-               Self {
-                       inner,
-                       state,
-                       disable_revocation_policy_check: false
-               }
-       }
-
-       /// Construct an EnforcingSigner with externally managed storage
-       ///
-       /// Since there are multiple copies of this struct for each channel, some coordination is needed
-       /// so that all copies are aware of enforcement state.  A pointer to this state is provided
-       /// here, usually by an implementation of KeysInterface.
-       pub fn new_with_revoked(inner: InMemorySigner, state: Arc<Mutex<EnforcementState>>, disable_revocation_policy_check: bool) -> Self {
-               Self {
-                       inner,
-                       state,
-                       disable_revocation_policy_check
-               }
-       }
-
-       pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features() }
-
-       #[cfg(test)]
-       pub fn get_enforcement_state(&self) -> MutexGuard<EnforcementState> {
-               self.state.lock().unwrap()
-       }
-}
-
-impl ChannelSigner for EnforcingSigner {
-       fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
-               self.inner.get_per_commitment_point(idx, secp_ctx)
-       }
-
-       fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
-               {
-                       let mut state = self.state.lock().unwrap();
-                       assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment);
-                       assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment);
-                       state.last_holder_revoked_commitment = idx;
-               }
-               self.inner.release_commitment_secret(idx)
-       }
-
-       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
-               let mut state = self.state.lock().unwrap();
-               let idx = holder_tx.commitment_number();
-               assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
-               state.last_holder_commitment = idx;
-               Ok(())
-       }
-
-       fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
-
-       fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
-
-       fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
-               self.inner.provide_channel_parameters(channel_parameters)
-       }
-}
-
-impl EcdsaChannelSigner for EnforcingSigner {
-       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
-
-               {
-                       let mut state = self.state.lock().unwrap();
-                       let actual_commitment_number = commitment_tx.commitment_number();
-                       let last_commitment_number = state.last_counterparty_commitment;
-                       // These commitment numbers are backwards counting.  We expect either the same as the previously encountered,
-                       // or the next one.
-                       assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number);
-                       // Ensure that the counterparty doesn't get more than two broadcastable commitments -
-                       // the last and the one we are trying to sign
-                       assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment);
-                       state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
-               }
-
-               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
-       }
-
-       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               let mut state = self.state.lock().unwrap();
-               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
-               state.last_counterparty_revoked_commitment = idx;
-               Ok(())
-       }
-
-       fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
-               let commitment_txid = trusted_tx.txid();
-               let holder_csv = self.inner.counterparty_selected_contest_delay();
-
-               let state = self.state.lock().unwrap();
-               let commitment_number = trusted_tx.commitment_number();
-               if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number {
-                       if !self.disable_revocation_policy_check {
-                               panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}",
-                                      state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0])
-                       }
-               }
-
-               for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
-                       assert!(this_htlc.transaction_output_index.is_some());
-                       let keys = trusted_tx.keys();
-                       let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.channel_type_features(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
-
-                       let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.channel_type_features(), &keys);
-
-                       let sighash_type = if self.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                               EcdsaSighashType::SinglePlusAnyoneCanPay
-                       } else {
-                               EcdsaSighashType::All
-                       };
-                       let sighash = hash_to_message!(
-                               &sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(
-                                       0, &htlc_redeemscript, this_htlc.amount_msat / 1000, sighash_type,
-                               ).unwrap()[..]
-                       );
-                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
-               }
-
-               Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
-       }
-
-       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
-       fn unsafe_sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               Ok(self.inner.unsafe_sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
-       }
-
-       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
-       }
-
-       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
-       }
-
-       fn sign_holder_htlc_transaction(
-               &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
-               secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()> {
-               assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input());
-               assert_eq!(htlc_tx.output[input], htlc_descriptor.tx_output(secp_ctx));
-               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
-       }
-
-       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
-       }
-
-       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               closing_tx.verify(self.inner.funding_outpoint().into_bitcoin_outpoint())
-                       .expect("derived different closing transaction");
-               Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
-       }
-
-       fn sign_holder_anchor_input(
-               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
-       ) -> Result<Signature, ()> {
-               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
-               // As long as our minimum dust limit is enforced and is greater than our anchor output
-               // value, an anchor output can only have an index within [0, 1].
-               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
-               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
-       }
-
-       fn sign_channel_announcement_with_funding_key(
-               &self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()> {
-               self.inner.sign_channel_announcement_with_funding_key(msg, secp_ctx)
-       }
-}
-
-impl WriteableEcdsaChannelSigner for EnforcingSigner {}
-
-impl Writeable for EnforcingSigner {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
-               // EnforcingSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
-               // ([`EnforcementState`]). `inner` is serialized here and deserialized by
-               // [`SignerProvider::read_chan_signer`]. `state` is managed by [`SignerProvider`]
-               // and will be serialized as needed by the implementation of that trait.
-               self.inner.write(writer)?;
-               Ok(())
-       }
-}
-
-impl EnforcingSigner {
-       fn verify_counterparty_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
-               commitment_tx.verify(&self.inner.get_channel_parameters().as_counterparty_broadcastable(),
-                                    self.inner.counterparty_pubkeys(), self.inner.pubkeys(), secp_ctx)
-                       .expect("derived different per-tx keys or built transaction")
-       }
-
-       fn verify_holder_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
-               commitment_tx.verify(&self.inner.get_channel_parameters().as_holder_broadcastable(),
-                                    self.inner.pubkeys(), self.inner.counterparty_pubkeys(), secp_ctx)
-                       .expect("derived different per-tx keys or built transaction")
-       }
-}
-
-/// The state used by [`EnforcingSigner`] in order to enforce policy checks
-///
-/// This structure is maintained by KeysInterface since we may have multiple copies of
-/// the signer and they must coordinate their state.
-#[derive(Clone)]
-pub struct EnforcementState {
-       /// The last counterparty commitment number we signed, backwards counting
-       pub last_counterparty_commitment: u64,
-       /// The last counterparty commitment they revoked, backwards counting
-       pub last_counterparty_revoked_commitment: u64,
-       /// The last holder commitment number we revoked, backwards counting
-       pub last_holder_revoked_commitment: u64,
-       /// The last validated holder commitment number, backwards counting
-       pub last_holder_commitment: u64,
-}
-
-impl EnforcementState {
-       /// Enforcement state for a new channel
-       pub fn new() -> Self {
-               EnforcementState {
-                       last_counterparty_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-               }
-       }
-}
index e79980370342ff01920fe5a37b4423719a228ac7..4836b4d6814f705cb111515a983733c4566e3a34 100644 (file)
@@ -15,7 +15,6 @@ use bitcoin::blockdata::transaction::Transaction;
 
 use crate::routing::router::Route;
 use crate::ln::chan_utils::HTLCClaim;
-use crate::util::logger::DebugBytes;
 
 macro_rules! log_iter {
        ($obj: expr) => {
@@ -42,10 +41,7 @@ macro_rules! log_bytes {
 pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16);
 impl<'a> core::fmt::Display for DebugFundingChannelId<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               for i in (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().iter() {
-                       write!(f, "{:02x}", i)?;
-               }
-               Ok(())
+               (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f)
        }
 }
 macro_rules! log_funding_channel_id {
@@ -57,7 +53,7 @@ macro_rules! log_funding_channel_id {
 pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T));
 impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               DebugBytes(&(self.0).0.to_channel_id()[..]).fmt(f)
+               (self.0).0.to_channel_id().fmt(f)
        }
 }
 macro_rules! log_funding_info {
index aa1fdfcee156216f6edcc79b3563a6b2f3f4e5e7..477cbcbdd9aa93493de6703e0d4d57cd81c135d2 100644 (file)
@@ -21,7 +21,7 @@
 //! <https://api.lightning.community/#signmessage>
 
 use crate::prelude::*;
-use crate::util::zbase32;
+use crate::util::base32;
 use bitcoin::hashes::{sha256d, Hash};
 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId};
 use bitcoin::secp256k1::{Error, Message, PublicKey, Secp256k1, SecretKey};
@@ -29,118 +29,119 @@ use bitcoin::secp256k1::{Error, Message, PublicKey, Secp256k1, SecretKey};
 static LN_MESSAGE_PREFIX: &[u8] = b"Lightning Signed Message:";
 
 fn sigrec_encode(sig_rec: RecoverableSignature) -> Vec<u8> {
-    let (rid, rsig) = sig_rec.serialize_compact();
-    let prefix = rid.to_i32() as u8 + 31;
+       let (rid, rsig) = sig_rec.serialize_compact();
+       let prefix = rid.to_i32() as u8 + 31;
 
-    [&[prefix], &rsig[..]].concat()
+       [&[prefix], &rsig[..]].concat()
 }
 
 fn sigrec_decode(sig_rec: Vec<u8>) -> Result<RecoverableSignature, Error> {
-    // Signature must be 64 + 1 bytes long (compact signature + recovery id)
-    if sig_rec.len() != 65 {
-        return Err(Error::InvalidSignature);
-    }
-
-    let rsig = &sig_rec[1..];
-    let rid = sig_rec[0] as i32 - 31;
-
-    match RecoveryId::from_i32(rid) {
-        Ok(x) => RecoverableSignature::from_compact(rsig, x),
-        Err(e) => Err(e)
-    }
+       // Signature must be 64 + 1 bytes long (compact signature + recovery id)
+       if sig_rec.len() != 65 {
+               return Err(Error::InvalidSignature);
+       }
+
+       let rsig = &sig_rec[1..];
+       let rid = sig_rec[0] as i32 - 31;
+
+       match RecoveryId::from_i32(rid) {
+               Ok(x) => RecoverableSignature::from_compact(rsig, x),
+               Err(e) => Err(e)
+       }
 }
 
 /// Creates a digital signature of a message given a SecretKey, like the node's secret.
 /// A receiver knowing the PublicKey (e.g. the node's id) and the message can be sure that the signature was generated by the caller.
 /// Signatures are EC recoverable, meaning that given the message and the signature the PublicKey of the signer can be extracted.
 pub fn sign(msg: &[u8], sk: &SecretKey) -> Result<String, Error> {
-    let secp_ctx = Secp256k1::signing_only();
-    let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
+       let secp_ctx = Secp256k1::signing_only();
+       let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
 
-    let sig = secp_ctx.sign_ecdsa_recoverable(&Message::from_slice(&msg_hash)?, sk);
-    Ok(zbase32::encode(&sigrec_encode(sig)))
+       let sig = secp_ctx.sign_ecdsa_recoverable(&Message::from_slice(&msg_hash)?, sk);
+       Ok(base32::Alphabet::ZBase32.encode(&sigrec_encode(sig)))
 }
 
 /// Recovers the PublicKey of the signer of the message given the message and the signature.
 pub fn recover_pk(msg: &[u8], sig: &str) ->  Result<PublicKey, Error> {
-    let secp_ctx = Secp256k1::verification_only();
-    let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
-
-    match zbase32::decode(&sig) {
-        Ok(sig_rec) => {
-            match sigrec_decode(sig_rec) {
-                Ok(sig) => secp_ctx.recover_ecdsa(&Message::from_slice(&msg_hash)?, &sig),
-                Err(e) => Err(e)
-            }
-        },
-        Err(_) => Err(Error::InvalidSignature)
-    }
+       let secp_ctx = Secp256k1::verification_only();
+       let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
+
+       match base32::Alphabet::ZBase32.decode(&sig) {
+               Ok(sig_rec) => {
+                       match sigrec_decode(sig_rec) {
+                               Ok(sig) => secp_ctx.recover_ecdsa(&Message::from_slice(&msg_hash)?, &sig),
+                               Err(e) => Err(e)
+                       }
+               },
+               Err(_) => Err(Error::InvalidSignature)
+       }
 }
 
 /// Verifies a message was signed by a PrivateKey that derives to a given PublicKey, given a message, a signature,
 /// and the PublicKey.
 pub fn verify(msg: &[u8], sig: &str, pk: &PublicKey) -> bool {
-    match recover_pk(msg, sig) {
-        Ok(x) => x == *pk,
-        Err(_) => false
-    }
+       match recover_pk(msg, sig) {
+               Ok(x) => x == *pk,
+               Err(_) => false
+       }
 }
 
 #[cfg(test)]
 mod test {
-    use core::str::FromStr;
-    use crate::util::message_signing::{sign, recover_pk, verify};
-    use bitcoin::secp256k1::ONE_KEY;
-    use bitcoin::secp256k1::{PublicKey, Secp256k1};
-
-    #[test]
-    fn test_sign() {
-        let message = "test message";
-        let zbase32_sig = sign(message.as_bytes(), &ONE_KEY);
-
-        assert_eq!(zbase32_sig.unwrap(), "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e")
-    }
-
-    #[test]
-    fn test_recover_pk() {
-        let message = "test message";
-        let sig = "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e";
-        let pk = recover_pk(message.as_bytes(), sig);
-
-        assert_eq!(pk.unwrap(), PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY))
-    }
-
-    #[test]
-    fn test_verify() {
-        let message = "another message";
-        let sig = sign(message.as_bytes(), &ONE_KEY).unwrap();
-        let pk = PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY);
-
-        assert!(verify(message.as_bytes(), &sig, &pk))
-    }
-
-    #[test]
-    fn test_verify_ground_truth_ish() {
-        // There are no standard tests vectors for Sign/Verify, using the same tests vectors as c-lightning to see if they are compatible.
-        // Taken from https://github.com/ElementsProject/lightning/blob/1275af6fbb02460c8eb2f00990bb0ef9179ce8f3/tests/test_misc.py#L1925-L1938
-
-        let corpus = [
-            ["@bitconner",
-             "is this compatible?",
-             "rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t",
-             "02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5"],
-            ["@duck1123",
-             "hi",
-             "rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg",
-             "02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b"],
-            ["@jochemin",
-             "hi",
-             "ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3",
-             "022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931"],
-        ];
-
-        for c in &corpus {
-            assert!(verify(c[1].as_bytes(), c[2], &PublicKey::from_str(c[3]).unwrap()))
-        }
-    }
+       use core::str::FromStr;
+       use crate::util::message_signing::{sign, recover_pk, verify};
+       use bitcoin::secp256k1::ONE_KEY;
+       use bitcoin::secp256k1::{PublicKey, Secp256k1};
+
+       #[test]
+       fn test_sign() {
+               let message = "test message";
+               let zbase32_sig = sign(message.as_bytes(), &ONE_KEY);
+
+               assert_eq!(zbase32_sig.unwrap(), "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e")
+       }
+
+       #[test]
+       fn test_recover_pk() {
+               let message = "test message";
+               let sig = "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e";
+               let pk = recover_pk(message.as_bytes(), sig);
+
+               assert_eq!(pk.unwrap(), PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY))
+       }
+
+       #[test]
+       fn test_verify() {
+               let message = "another message";
+               let sig = sign(message.as_bytes(), &ONE_KEY).unwrap();
+               let pk = PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY);
+
+               assert!(verify(message.as_bytes(), &sig, &pk))
+       }
+
+       #[test]
+       fn test_verify_ground_truth_ish() {
+               // There are no standard tests vectors for Sign/Verify, using the same tests vectors as c-lightning to see if they are compatible.
+               // Taken from https://github.com/ElementsProject/lightning/blob/1275af6fbb02460c8eb2f00990bb0ef9179ce8f3/tests/test_misc.py#L1925-L1938
+
+               let corpus = [
+                       ["@bitconner",
+                       "is this compatible?",
+                       "rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t",
+                       "02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5"],
+                       ["@duck1123",
+                       "hi",
+                       "rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg",
+                       "02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b"],
+                       ["@jochemin",
+                       "hi",
+                       "ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3",
+                       "022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931"],
+               ];
+
+               for c in &corpus {
+                       assert!(verify(c[1].as_bytes(), c[2], &PublicKey::from_str(c[3]).unwrap()))
+               }
+       }
 }
+
index dd9d2744e1c7927ba82bab9244a355020b5d9660..e86885a83dbd0160b42bc103ecb5e737ea39bb7b 100644 (file)
@@ -22,14 +22,14 @@ pub mod invoice;
 pub mod persist;
 pub mod string;
 pub mod wakers;
+#[cfg(fuzzing)]
+pub mod base32;
+#[cfg(not(fuzzing))]
+pub(crate) mod base32;
 
 pub(crate) mod atomic_counter;
 pub(crate) mod byte_utils;
 pub(crate) mod chacha20;
-#[cfg(fuzzing)]
-pub mod zbase32;
-#[cfg(not(fuzzing))]
-pub(crate) mod zbase32;
 #[cfg(not(fuzzing))]
 pub(crate) mod poly1305;
 pub(crate) mod chacha20poly1305rfc;
@@ -56,5 +56,5 @@ pub mod test_utils;
 /// impls of traits that add exra enforcement on the way they're called. Useful for detecting state
 /// machine errors and used in fuzz targets and tests.
 #[cfg(any(test, feature = "_test_utils"))]
-pub mod enforcing_trait_impls;
+pub mod test_channel_signer;
 
index 435ef30d33198609b8cd38d110b1c1f9e70f7ddf..372a094a931bed6dcca159e4bab310cdd7863a4b 100644 (file)
@@ -4,13 +4,16 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-//! This module contains a simple key-value store trait KVStorePersister that
+//! This module contains a simple key-value store trait [`KVStore`] that
 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
 //! and [`ChannelMonitor`] all in one place.
 
 use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
+use bitcoin::hashes::hex::{FromHex, ToHex};
+use bitcoin::{BlockHash, Txid};
+
 use crate::io;
+use crate::prelude::{Vec, String};
 use crate::routing::scoring::WriteableScore;
 
 use crate::chain;
@@ -22,15 +25,93 @@ use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
 use crate::ln::channelmanager::ChannelManager;
 use crate::routing::router::Router;
 use crate::routing::gossip::NetworkGraph;
-use super::{logger::Logger, ser::Writeable};
-
-/// Trait for a key-value store for persisting some writeable object at some key
-/// Implementing `KVStorePersister` provides auto-implementations for [`Persister`]
-/// and [`Persist`] traits.  It uses "manager", "network_graph",
-/// and "monitors/{funding_txo_id}_{funding_txo_index}" for keys.
-pub trait KVStorePersister {
-       /// Persist the given writeable using the provided key
-       fn persist<W: Writeable>(&self, key: &str, object: &W) -> io::Result<()>;
+use crate::util::logger::Logger;
+use crate::util::ser::{ReadableArgs, Writeable};
+
+/// The alphabet of characters allowed for namespaces and keys.
+pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
+
+/// The maximum number of characters namespaces and keys may have.
+pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
+
+/// The namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
+
+/// The namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
+/// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
+
+/// The namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
+
+/// The namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
+
+/// Provides an interface that allows storage and retrieval of persisted values that are associated
+/// with given keys.
+///
+/// In order to avoid collisions the key space is segmented based on the given `namespace`s and
+/// `sub_namespace`s. Implementations of this trait are free to handle them in different ways, as
+/// long as per-namespace key uniqueness is asserted.
+///
+/// Keys and namespaces are required to be valid ASCII strings in the range of
+/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
+/// namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if `namespace` is
+/// empty, `sub_namespace` is required to be empty, too. This means that concerns should always be
+/// separated by namespace first, before sub-namespaces are used. While the number of namespaces
+/// will be relatively small and is determined at compile time, there may be many sub-namespaces
+/// per namespace. Note that per-namespace uniqueness needs to also hold for keys *and*
+/// namespaces/sub-namespaces in any given namespace/sub-namespace, i.e., conflicts between keys
+/// and equally named namespaces/sub-namespaces must be avoided.
+///
+/// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
+/// interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the
+/// data model previously assumed by `KVStorePersister::persist`.
+pub trait KVStore {
+       /// Returns the data stored for the given `namespace`, `sub_namespace`, and `key`.
+       ///
+       /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
+       /// `namespace` and `sub_namespace`.
+       ///
+       /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
+       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>>;
+       /// Persists the given data under the given `key`.
+       ///
+       /// Will create the given `namespace` and `sub_namespace` if not already present in the store.
+       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()>;
+       /// Removes any data that had previously been persisted under the given `key`.
+       ///
+       /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
+       /// remove the given `key` at some point in time after the method returns, e.g., as part of an
+       /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
+       /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
+       ///
+       /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
+       /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
+       /// potentially get lost on crash after the method returns. Therefore, this flag should only be
+       /// set for `remove` operations that can be safely replayed at a later time.
+       ///
+       /// Returns successfully if no data will be stored for the given `namespace`, `sub_namespace`, and
+       /// `key`, independently of whether it was present before its invokation or not.
+       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()>;
+       /// Returns a list of keys that are stored under the given `sub_namespace` in `namespace`.
+       ///
+       /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
+       /// returned keys. Returns an empty list if `namespace` or `sub_namespace` is unknown.
+       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>>;
 }
 
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
@@ -54,7 +135,8 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F:
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
 }
 
-impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
+
+impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
        where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
@@ -64,41 +146,113 @@ impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Dere
                R::Target: 'static + Router,
                L::Target: 'static + Logger,
 {
-       /// Persist the given ['ChannelManager'] to disk with the name "manager", returning an error if persistence failed.
+       /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
-               self.persist("manager", channel_manager)
+               self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
+                                  CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
+                                  CHANNEL_MANAGER_PERSISTENCE_KEY,
+                                  &channel_manager.encode())
        }
 
-       /// Persist the given [`NetworkGraph`] to disk with the name "network_graph", returning an error if persistence failed.
+       /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
-               self.persist("network_graph", network_graph)
+               self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
+                                  NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
+                                  NETWORK_GRAPH_PERSISTENCE_KEY,
+                                  &network_graph.encode())
        }
 
-       /// Persist the given [`WriteableScore`] to disk with name "scorer", returning an error if persistence failed.
+       /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
-               self.persist("scorer", &scorer)
+               self.write(SCORER_PERSISTENCE_NAMESPACE,
+                                  SCORER_PERSISTENCE_SUB_NAMESPACE,
+                                  SCORER_PERSISTENCE_KEY,
+                                  &scorer.encode())
        }
 }
 
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStorePersister> Persist<ChannelSigner> for K {
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
        // TODO: We really need a way for the persister to inform the user that its time to crash/shut
        // down once these start returning failure.
        // A PermanentFailure implies we should probably just shut down the node since we're
        // force-closing channels without even broadcasting!
 
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               match self.persist(&key, monitor) {
+               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               match self.write(
+                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       &key, &monitor.encode())
+               {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
                        Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
                }
        }
 
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               match self.persist(&key, monitor) {
+               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               match self.write(
+                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       &key, &monitor.encode())
+               {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
                        Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
                }
        }
 }
+
+/// Read previously persisted [`ChannelMonitor`]s from the store.
+pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
+       kv_store: K, entropy_source: ES, signer_provider: SP,
+) -> io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
+where
+       K::Target: KVStore,
+       ES::Target: EntropySource + Sized,
+       SP::Target: SignerProvider + Sized,
+{
+       let mut res = Vec::new();
+
+       for stored_key in kv_store.list(
+               CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
+       {
+               if stored_key.len() < 66 {
+                       return Err(io::Error::new(
+                               io::ErrorKind::InvalidData,
+                               "Stored key has invalid length"));
+               }
+
+               let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
+                       io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
+               })?;
+
+               let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
+                       io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
+               })?;
+
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+                       &mut io::Cursor::new(
+                               kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
+                       (&*entropy_source, &*signer_provider),
+               ) {
+                       Ok((block_hash, channel_monitor)) => {
+                               if channel_monitor.get_funding_txo().0.txid != txid
+                                       || channel_monitor.get_funding_txo().0.index != index
+                               {
+                                       return Err(io::Error::new(
+                                               io::ErrorKind::InvalidData,
+                                               "ChannelMonitor was stored under the wrong key",
+                                       ));
+                               }
+                               res.push((block_hash, channel_monitor));
+                       }
+                       Err(_) => {
+                               return Err(io::Error::new(
+                                       io::ErrorKind::InvalidData,
+                                       "Failed to deserialize ChannelMonitor"
+                               ))
+                       }
+               }
+       }
+       Ok(res)
+}
diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs
new file mode 100644 (file)
index 0000000..2fb1c49
--- /dev/null
@@ -0,0 +1,297 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
+use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
+use crate::ln::{chan_utils, msgs, PaymentPreimage};
+use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
+
+use crate::prelude::*;
+use core::cmp;
+use crate::sync::{Mutex, Arc};
+#[cfg(test)] use crate::sync::MutexGuard;
+
+use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
+use bitcoin::util::sighash;
+
+use bitcoin::secp256k1;
+use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+use crate::events::bump_transaction::HTLCDescriptor;
+use crate::util::ser::{Writeable, Writer};
+use crate::io::Error;
+use crate::ln::features::ChannelTypeFeatures;
+
+/// Initial value for revoked commitment downward counter
+pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
+
+/// An implementation of Sign that enforces some policy checks.  The current checks
+/// are an incomplete set.  They include:
+///
+/// - When signing, the holder transaction has not been revoked
+/// - When revoking, the holder transaction has not been signed
+/// - The holder commitment number is monotonic and without gaps
+/// - The revoked holder commitment number is monotonic and without gaps
+/// - There is at least one unrevoked holder transaction at all times
+/// - The counterparty commitment number is monotonic and without gaps
+/// - The pre-derived keys and pre-built transaction in CommitmentTransaction were correctly built
+///
+/// Eventually we will probably want to expose a variant of this which would essentially
+/// be what you'd want to run on a hardware wallet.
+///
+/// Note that counterparty signatures on the holder transaction are not checked, but it should
+/// be in a complete implementation.
+///
+/// Note that before we do so we should ensure its serialization format has backwards- and
+/// forwards-compatibility prefix/suffixes!
+#[derive(Clone)]
+pub struct TestChannelSigner {
+       pub inner: InMemorySigner,
+       /// Channel state used for policy enforcement
+       pub state: Arc<Mutex<EnforcementState>>,
+       pub disable_revocation_policy_check: bool,
+}
+
+impl PartialEq for TestChannelSigner {
+       fn eq(&self, o: &Self) -> bool {
+               Arc::ptr_eq(&self.state, &o.state)
+       }
+}
+
+impl TestChannelSigner {
+       /// Construct an TestChannelSigner
+       pub fn new(inner: InMemorySigner) -> Self {
+               let state = Arc::new(Mutex::new(EnforcementState::new()));
+               Self {
+                       inner,
+                       state,
+                       disable_revocation_policy_check: false
+               }
+       }
+
+       /// Construct an TestChannelSigner with externally managed storage
+       ///
+       /// Since there are multiple copies of this struct for each channel, some coordination is needed
+       /// so that all copies are aware of enforcement state.  A pointer to this state is provided
+       /// here, usually by an implementation of KeysInterface.
+       pub fn new_with_revoked(inner: InMemorySigner, state: Arc<Mutex<EnforcementState>>, disable_revocation_policy_check: bool) -> Self {
+               Self {
+                       inner,
+                       state,
+                       disable_revocation_policy_check
+               }
+       }
+
+       pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features() }
+
+       #[cfg(test)]
+       pub fn get_enforcement_state(&self) -> MutexGuard<EnforcementState> {
+               self.state.lock().unwrap()
+       }
+}
+
+impl ChannelSigner for TestChannelSigner {
+       fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
+               self.inner.get_per_commitment_point(idx, secp_ctx)
+       }
+
+       fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
+               {
+                       let mut state = self.state.lock().unwrap();
+                       assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment);
+                       assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment);
+                       state.last_holder_revoked_commitment = idx;
+               }
+               self.inner.release_commitment_secret(idx)
+       }
+
+       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+               let mut state = self.state.lock().unwrap();
+               let idx = holder_tx.commitment_number();
+               assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
+               state.last_holder_commitment = idx;
+               Ok(())
+       }
+
+       fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
+
+       fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
+
+       fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
+               self.inner.provide_channel_parameters(channel_parameters)
+       }
+}
+
+impl EcdsaChannelSigner for TestChannelSigner {
+       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
+
+               {
+                       let mut state = self.state.lock().unwrap();
+                       let actual_commitment_number = commitment_tx.commitment_number();
+                       let last_commitment_number = state.last_counterparty_commitment;
+                       // These commitment numbers are backwards counting.  We expect either the same as the previously encountered,
+                       // or the next one.
+                       assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number);
+                       // Ensure that the counterparty doesn't get more than two broadcastable commitments -
+                       // the last and the one we are trying to sign
+                       assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment);
+                       state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
+               }
+
+               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
+       }
+
+       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
+               let mut state = self.state.lock().unwrap();
+               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
+               state.last_counterparty_revoked_commitment = idx;
+               Ok(())
+       }
+
+       fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
+               let commitment_txid = trusted_tx.txid();
+               let holder_csv = self.inner.counterparty_selected_contest_delay();
+
+               let state = self.state.lock().unwrap();
+               let commitment_number = trusted_tx.commitment_number();
+               if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number {
+                       if !self.disable_revocation_policy_check {
+                               panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}",
+                                      state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0])
+                       }
+               }
+
+               for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
+                       assert!(this_htlc.transaction_output_index.is_some());
+                       let keys = trusted_tx.keys();
+                       let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.channel_type_features(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+                       let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.channel_type_features(), &keys);
+
+                       let sighash_type = if self.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+                               EcdsaSighashType::SinglePlusAnyoneCanPay
+                       } else {
+                               EcdsaSighashType::All
+                       };
+                       let sighash = hash_to_message!(
+                               &sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(
+                                       0, &htlc_redeemscript, this_htlc.amount_msat / 1000, sighash_type,
+                               ).unwrap()[..]
+                       );
+                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
+               }
+
+               Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
+       }
+
+       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+       fn unsafe_sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               Ok(self.inner.unsafe_sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
+       }
+
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
+       }
+
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
+       }
+
+       fn sign_holder_htlc_transaction(
+               &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
+               secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()> {
+               assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input());
+               assert_eq!(htlc_tx.output[input], htlc_descriptor.tx_output(secp_ctx));
+               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
+       }
+
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
+       }
+
+       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               closing_tx.verify(self.inner.funding_outpoint().into_bitcoin_outpoint())
+                       .expect("derived different closing transaction");
+               Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
+       }
+
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()> {
+               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
+               // As long as our minimum dust limit is enforced and is greater than our anchor output
+               // value, an anchor output can only have an index within [0, 1].
+               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
+               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
+       }
+
+       fn sign_channel_announcement_with_funding_key(
+               &self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()> {
+               self.inner.sign_channel_announcement_with_funding_key(msg, secp_ctx)
+       }
+}
+
+impl WriteableEcdsaChannelSigner for TestChannelSigner {}
+
+impl Writeable for TestChannelSigner {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
+               // TestChannelSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
+               // ([`EnforcementState`]). `inner` is serialized here and deserialized by
+               // [`SignerProvider::read_chan_signer`]. `state` is managed by [`SignerProvider`]
+               // and will be serialized as needed by the implementation of that trait.
+               self.inner.write(writer)?;
+               Ok(())
+       }
+}
+
+impl TestChannelSigner {
+       fn verify_counterparty_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
+               commitment_tx.verify(&self.inner.get_channel_parameters().as_counterparty_broadcastable(),
+                                    self.inner.counterparty_pubkeys(), self.inner.pubkeys(), secp_ctx)
+                       .expect("derived different per-tx keys or built transaction")
+       }
+
+       fn verify_holder_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
+               commitment_tx.verify(&self.inner.get_channel_parameters().as_holder_broadcastable(),
+                                    self.inner.pubkeys(), self.inner.counterparty_pubkeys(), secp_ctx)
+                       .expect("derived different per-tx keys or built transaction")
+       }
+}
+
+/// The state used by [`TestChannelSigner`] in order to enforce policy checks
+///
+/// This structure is maintained by KeysInterface since we may have multiple copies of
+/// the signer and they must coordinate their state.
+#[derive(Clone)]
+pub struct EnforcementState {
+       /// The last counterparty commitment number we signed, backwards counting
+       pub last_counterparty_commitment: u64,
+       /// The last counterparty commitment they revoked, backwards counting
+       pub last_counterparty_revoked_commitment: u64,
+       /// The last holder commitment number we revoked, backwards counting
+       pub last_holder_revoked_commitment: u64,
+       /// The last validated holder commitment number, backwards counting
+       pub last_holder_commitment: u64,
+}
+
+impl EnforcementState {
+       /// Enforcement state for a new channel
+       pub fn new() -> Self {
+               EnforcementState {
+                       last_counterparty_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+               }
+       }
+}
index bcd460ee1b11e1343add61ccee1bfb9c713726a1..785b40befd472ba10b30fb618bd75eb9ab20c15a 100644 (file)
@@ -20,6 +20,7 @@ use crate::chain::transaction::OutPoint;
 use crate::sign;
 use crate::events;
 use crate::events::bump_transaction::{WalletSource, Utxo};
+use crate::ln::ChannelId;
 use crate::ln::channelmanager;
 use crate::ln::chan_utils::CommitmentTransaction;
 use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
@@ -31,11 +32,13 @@ use crate::offers::invoice_request::UnsignedInvoiceRequest;
 use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
 use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
 use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
-use crate::routing::scoring::{ChannelUsage, Score};
+use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
+use crate::sync::RwLock;
 use crate::util::config::UserConfig;
-use crate::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use crate::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use crate::util::logger::{Logger, Level, Record};
 use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable};
+use crate::util::persist::KVStore;
 
 use bitcoin::EcdsaSighashType;
 use bitcoin::blockdata::constants::ChainHash;
@@ -59,7 +62,6 @@ use regex;
 use crate::io;
 use crate::prelude::*;
 use core::cell::RefCell;
-use core::ops::DerefMut;
 use core::time::Duration;
 use crate::sync::{Mutex, Arc};
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
@@ -100,11 +102,11 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
 pub struct TestRouter<'a> {
        pub network_graph: Arc<NetworkGraph<&'a TestLogger>>,
        pub next_routes: Mutex<VecDeque<(RouteParameters, Result<Route, LightningError>)>>,
-       pub scorer: &'a Mutex<TestScorer>,
+       pub scorer: &'a RwLock<TestScorer>,
 }
 
 impl<'a> TestRouter<'a> {
-       pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a Mutex<TestScorer>) -> Self {
+       pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a RwLock<TestScorer>) -> Self {
                Self { network_graph, next_routes: Mutex::new(VecDeque::new()), scorer }
        }
 
@@ -122,8 +124,8 @@ impl<'a> Router for TestRouter<'a> {
                if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
                        assert_eq!(find_route_query, *params);
                        if let Ok(ref route) = find_route_res {
-                               let mut binding = self.scorer.lock().unwrap();
-                               let scorer = ScorerAccountingForInFlightHtlcs::new(binding.deref_mut(), &inflight_htlcs);
+                               let scorer = self.scorer.read().unwrap();
+                               let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
                                for path in &route.paths {
                                        let mut aggregate_msat = 0u64;
                                        for (idx, hop) in path.hops.iter().rev().enumerate() {
@@ -150,7 +152,7 @@ impl<'a> Router for TestRouter<'a> {
                let logger = TestLogger::new();
                find_route(
                        payer, params, &self.network_graph, first_hops, &logger,
-                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.lock().unwrap().deref_mut(), &inflight_htlcs), &(),
+                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(),
                        &[42; 32]
                )
        }
@@ -173,7 +175,7 @@ impl EntropySource for OnlyReadsKeysInterface {
        fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }}
 
 impl SignerProvider for OnlyReadsKeysInterface {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!(); }
 
@@ -183,7 +185,7 @@ impl SignerProvider for OnlyReadsKeysInterface {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        false
@@ -195,18 +197,18 @@ impl SignerProvider for OnlyReadsKeysInterface {
 }
 
 pub struct TestChainMonitor<'a> {
-       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingSigner>)>>,
-       pub monitor_updates: Mutex<HashMap<[u8; 32], Vec<channelmonitor::ChannelMonitorUpdate>>>,
-       pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64, MonitorUpdateId)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<EnforcingSigner>>,
+       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
+       pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
+       pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
        /// boolean.
-       pub expect_channel_force_closed: Mutex<Option<([u8; 32], bool)>>,
+       pub expect_channel_force_closed: Mutex<Option<(ChannelId, bool)>>,
 }
 impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        monitor_updates: Mutex::new(HashMap::new()),
@@ -217,18 +219,18 @@ impl<'a> TestChainMonitor<'a> {
                }
        }
 
-       pub fn complete_sole_pending_chan_update(&self, channel_id: &[u8; 32]) {
+       pub fn complete_sole_pending_chan_update(&self, channel_id: &ChannelId) {
                let (outpoint, _, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone();
                self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
        }
 }
-impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
+impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
@@ -262,7 +264,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap();
                w.0.clear();
                monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == *monitor);
                self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
@@ -294,6 +296,7 @@ pub(crate) struct WatchtowerPersister {
 }
 
 impl WatchtowerPersister {
+       #[cfg(test)]
        pub(crate) fn new(destination_script: Script) -> Self {
                WatchtowerPersister {
                        persister: TestPersister::new(),
@@ -303,6 +306,7 @@ impl WatchtowerPersister {
                }
        }
 
+       #[cfg(test)]
        pub(crate) fn justice_tx(&self, funding_txo: OutPoint, commitment_txid: &Txid)
        -> Option<Transaction> {
                self.watchtower_state.lock().unwrap().get(&funding_txo).unwrap().get(commitment_txid).cloned()
@@ -423,6 +427,97 @@ impl<Signer: sign::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> fo
        }
 }
 
+pub struct TestStore {
+       persisted_bytes: Mutex<HashMap<String, HashMap<String, Vec<u8>>>>,
+       read_only: bool,
+}
+
+impl TestStore {
+       pub fn new(read_only: bool) -> Self {
+               let persisted_bytes = Mutex::new(HashMap::new());
+               Self { persisted_bytes, read_only }
+       }
+}
+
+impl KVStore for TestStore {
+       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> {
+               let persisted_lock = self.persisted_bytes.lock().unwrap();
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+
+               if let Some(outer_ref) = persisted_lock.get(&prefixed) {
+                       if let Some(inner_ref) = outer_ref.get(key) {
+                               let bytes = inner_ref.clone();
+                               Ok(bytes)
+                       } else {
+                               Err(io::Error::new(io::ErrorKind::NotFound, "Key not found"))
+                       }
+               } else {
+                       Err(io::Error::new(io::ErrorKind::NotFound, "Namespace not found"))
+               }
+       }
+
+       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> {
+               if self.read_only {
+                       return Err(io::Error::new(
+                               io::ErrorKind::PermissionDenied,
+                               "Cannot modify read-only store",
+                       ));
+               }
+               let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+               let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new());
+               let mut bytes = Vec::new();
+               bytes.write_all(buf)?;
+               outer_e.insert(key.to_string(), bytes);
+               Ok(())
+       }
+
+       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> {
+               if self.read_only {
+                       return Err(io::Error::new(
+                               io::ErrorKind::PermissionDenied,
+                               "Cannot modify read-only store",
+                       ));
+               }
+
+               let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+               if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) {
+                               outer_ref.remove(&key.to_string());
+               }
+
+               Ok(())
+       }
+
+       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> {
+               let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+               match persisted_lock.entry(prefixed) {
+                       hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()),
+                       hash_map::Entry::Vacant(_) => Ok(Vec::new()),
+               }
+       }
+}
+
 pub struct TestBroadcaster {
        pub txn_broadcasted: Mutex<Vec<Transaction>>,
        pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
@@ -977,16 +1072,16 @@ impl NodeSigner for TestKeysInterface {
 }
 
 impl SignerProvider for TestKeysInterface {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
                self.backing.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> EnforcingSigner {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner {
                let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id);
                let state = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+               TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
@@ -995,7 +1090,7 @@ impl SignerProvider for TestKeysInterface {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        self.disable_revocation_policy_check
@@ -1036,10 +1131,10 @@ impl TestKeysInterface {
                self
        }
 
-       pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> EnforcingSigner {
+       pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> TestChannelSigner {
                let keys = self.backing.derive_channel_keys(channel_value_satoshis, id);
                let state = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+               TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
        fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
@@ -1160,7 +1255,7 @@ impl crate::util::ser::Writeable for TestScorer {
        fn write<W: crate::util::ser::Writer>(&self, _: &mut W) -> Result<(), crate::io::Error> { unreachable!(); }
 }
 
-impl Score for TestScorer {
+impl ScoreLookUp for TestScorer {
        type ScoreParams = ();
        fn channel_penalty_msat(
                &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage, _score_params: &Self::ScoreParams
@@ -1176,7 +1271,9 @@ impl Score for TestScorer {
                }
                0
        }
+}
 
+impl ScoreUpdate for TestScorer {
        fn payment_path_failed(&mut self, _actual_path: &Path, _actual_short_channel_id: u64) {}
 
        fn payment_path_successful(&mut self, _actual_path: &Path) {}
diff --git a/lightning/src/util/zbase32.rs b/lightning/src/util/zbase32.rs
deleted file mode 100644 (file)
index 8a7bf35..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-// This is a modification of base32 encoding to support the zbase32 alphabet.
-// The original piece of software can be found at https://github.com/andreasots/base32
-// The original portions of this software are Copyright (c) 2015 The base32 Developers
-
-/* This file is licensed under either of
- *  Apache License, Version 2.0, (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
- *  MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT)
- * at your option.
-*/
-
-use crate::prelude::*;
-
-const ALPHABET: &'static [u8] = b"ybndrfg8ejkmcpqxot1uwisza345h769";
-
-/// Encodes some bytes as a zbase32 string
-pub fn encode(data: &[u8]) -> String {
-       let mut ret = Vec::with_capacity((data.len() + 4) / 5 * 8);
-
-       for chunk in data.chunks(5) {
-               let buf = {
-                       let mut buf = [0u8; 5];
-                       for (i, &b) in chunk.iter().enumerate() {
-                               buf[i] = b;
-                       }
-                       buf
-               };
-
-               ret.push(ALPHABET[((buf[0] & 0xF8) >> 3) as usize]);
-               ret.push(ALPHABET[(((buf[0] & 0x07) << 2) | ((buf[1] & 0xC0) >> 6)) as usize]);
-               ret.push(ALPHABET[((buf[1] & 0x3E) >> 1) as usize]);
-               ret.push(ALPHABET[(((buf[1] & 0x01) << 4) | ((buf[2] & 0xF0) >> 4)) as usize]);
-               ret.push(ALPHABET[(((buf[2] & 0x0F) << 1) | (buf[3] >> 7)) as usize]);
-               ret.push(ALPHABET[((buf[3] & 0x7C) >> 2) as usize]);
-               ret.push(ALPHABET[(((buf[3] & 0x03) << 3) | ((buf[4] & 0xE0) >> 5)) as usize]);
-               ret.push(ALPHABET[(buf[4] & 0x1F) as usize]);
-       }
-
-       ret.truncate((data.len() * 8 + 4) / 5);
-
-       // Check that our capacity calculation doesn't under-shoot in fuzzing
-       #[cfg(fuzzing)]
-       assert_eq!(ret.capacity(), (data.len() + 4) / 5 * 8);
-
-       String::from_utf8(ret).unwrap()
-}
-
-// ASCII 0-Z
-const INV_ALPHABET: [i8; 43] = [
-       -1, 18, -1, 25, 26, 27, 30, 29, 7, 31, -1, -1, -1, -1, -1, -1, -1,  24, 1, 12, 3, 8, 5, 6, 28,
-       21, 9, 10, -1, 11, 2, 16, 13, 14, 4, 22, 17, 19, -1, 20, 15, 0, 23,
-];
-
-/// Decodes a zbase32 string to the original bytes, failing if the string was not encoded by a
-/// proper zbase32 encoder.
-pub fn decode(data: &str) -> Result<Vec<u8>, ()> {
-       if !data.is_ascii() {
-               return Err(());
-       }
-
-       let data = data.as_bytes();
-       let output_length = data.len() * 5 / 8;
-       if data.len() > (output_length * 8 + 4) / 5 {
-               // If the string has more charachters than are required to encode the number of bytes
-               // decodable, treat the string as invalid.
-               return Err(());
-       }
-
-       let mut ret = Vec::with_capacity((data.len() + 7) / 8 * 5);
-
-       for chunk in data.chunks(8) {
-               let buf = {
-                       let mut buf = [0u8; 8];
-                       for (i, &c) in chunk.iter().enumerate() {
-                               match INV_ALPHABET.get(c.to_ascii_uppercase().wrapping_sub(b'0') as usize) {
-                                       Some(&-1) | None => return Err(()),
-                                       Some(&value) => buf[i] = value as u8,
-                               };
-                       }
-                       buf
-               };
-               ret.push((buf[0] << 3) | (buf[1] >> 2));
-               ret.push((buf[1] << 6) | (buf[2] << 1) | (buf[3] >> 4));
-               ret.push((buf[3] << 4) | (buf[4] >> 1));
-               ret.push((buf[4] << 7) | (buf[5] << 2) | (buf[6] >> 3));
-               ret.push((buf[6] << 5) | buf[7]);
-       }
-       for c in ret.drain(output_length..) {
-               if c != 0 {
-                       // If the original string had any bits set at positions outside of the encoded data,
-                       // treat the string as invalid.
-                       return Err(());
-               }
-       }
-
-       // Check that our capacity calculation doesn't under-shoot in fuzzing
-       #[cfg(fuzzing)]
-       assert_eq!(ret.capacity(), (data.len() + 7) / 8 * 5);
-
-       Ok(ret)
-}
-
-#[cfg(test)]
-mod tests {
-       use super::*;
-
-       const TEST_DATA: &[(&str, &[u8])] = &[
-               ("",       &[]),
-               ("yy",   &[0x00]),
-               ("oy",   &[0x80]),
-               ("tqrey",   &[0x8b, 0x88, 0x80]),
-               ("6n9hq",  &[0xf0, 0xbf, 0xc7]),
-               ("4t7ye",  &[0xd4, 0x7a, 0x04]),
-               ("6im5sdy", &[0xf5, 0x57, 0xbb, 0x0c]),
-               ("ybndrfg8ejkmcpqxot1uwisza345h769", &[0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6,
-                                                                                                       0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, 0xd7, 0xc6,
-                                                                                                       0x75, 0xbe, 0x77, 0xdf])
-       ];
-
-       #[test]
-       fn test_encode() {
-               for &(zbase32, data) in TEST_DATA {
-                       assert_eq!(encode(data), zbase32);
-               }
-       }
-
-       #[test]
-       fn test_decode() {
-               for &(zbase32, data) in TEST_DATA {
-                       assert_eq!(decode(zbase32).unwrap(), data);
-               }
-       }
-
-       #[test]
-       fn test_decode_wrong() {
-               const WRONG_DATA: &[&str] = &["00", "l1", "?", "="];
-
-               for &data in WRONG_DATA {
-                       match decode(data) {
-                               Ok(_) => assert!(false, "Data shouldn't be decodable"),
-                               Err(_) => assert!(true),
-                       }
-               }
-       }
-}
diff --git a/pending_changelog/1-hop-bps.txt b/pending_changelog/1-hop-bps.txt
new file mode 100644 (file)
index 0000000..aca2260
--- /dev/null
@@ -0,0 +1,4 @@
+## Backwards Compatibility
+
+* Creating a blinded path to receive a payment over and then downgrading to a version of LDK prior
+       to 0.0.117 may result in failure to receive the payment (#2413).
diff --git a/pending_changelog/invoice_request_failed_downgrade.txt b/pending_changelog/invoice_request_failed_downgrade.txt
new file mode 100644 (file)
index 0000000..d701cef
--- /dev/null
@@ -0,0 +1,3 @@
+## Backwards Compatibility
+
+* If an `Event::InvoiceRequestFailed` was generated for a BOLT 12 payment (#2371), downgrading will result in the payment silently failing if the event had not been processed yet.
diff --git a/pending_changelog/kvstore.txt b/pending_changelog/kvstore.txt
new file mode 100644 (file)
index 0000000..d96fd69
--- /dev/null
@@ -0,0 +1,3 @@
+## Backwards Compatibility
+
+* Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
diff --git a/pending_changelog/move_netaddress_to_socketaddress.txt b/pending_changelog/move_netaddress_to_socketaddress.txt
new file mode 100644 (file)
index 0000000..5153ed1
--- /dev/null
@@ -0,0 +1 @@
+* The `NetAddress` has been moved to `SocketAddress`. The fieds `IPv4` and `IPv6` are also rename to `TcpIpV4` and `TcpIpV6` (#2358).
diff --git a/pending_changelog/new_channel_id_type_pr_2485.txt b/pending_changelog/new_channel_id_type_pr_2485.txt
new file mode 100644 (file)
index 0000000..4ae3c2c
--- /dev/null
@@ -0,0 +1 @@
+* In several APIs, `channel_id` parameters have been changed from type `[u8; 32]` to newly introduced `ChannelId` type, from `ln` namespace (`lightning::ln::ChannelId`) (PR #2485)
diff --git a/pending_changelog/routes_route_params.txt b/pending_changelog/routes_route_params.txt
new file mode 100644 (file)
index 0000000..e88a1c7
--- /dev/null
@@ -0,0 +1,3 @@
+# Backwards Compatibility
+
+- `Route` objects written with LDK versions prior to 0.0.117 won't be retryable after being deserialized with LDK 0.0.117 or above.