Merge pull request #2514 from valentinewallace/2023-08-compute-blindedpayinfo
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Sun, 10 Sep 2023 03:02:22 +0000 (03:02 +0000)
committerGitHub <noreply@github.com>
Sun, 10 Sep 2023 03:02:22 +0000 (03:02 +0000)
Aggregate `BlindedPayInfo` for blinded routes

68 files changed:
bench/benches/bench.rs
ci/ci-tests.sh
fuzz/src/base32.rs [new file with mode: 0644]
fuzz/src/bin/base32_target.rs [new file with mode: 0644]
fuzz/src/bin/fromstr_to_netaddress_target.rs [new file with mode: 0644]
fuzz/src/bin/gen_target.sh
fuzz/src/chanmon_consistency.rs
fuzz/src/chanmon_deser.rs
fuzz/src/fromstr_to_netaddress.rs [new file with mode: 0644]
fuzz/src/full_stack.rs
fuzz/src/lib.rs
fuzz/src/onion_message.rs
fuzz/src/router.rs
fuzz/src/utils/test_persister.rs
fuzz/src/zbase32.rs
fuzz/targets.h
lightning-background-processor/src/lib.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/fs_store.rs [new file with mode: 0644]
lightning-persister/src/lib.rs
lightning-persister/src/test_utils.rs [new file with mode: 0644]
lightning-persister/src/util.rs [deleted file]
lightning-persister/src/utils.rs [new file with mode: 0644]
lightning/src/chain/chainmonitor.rs
lightning/src/events/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channel_id.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/inbound_payment.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/offers/invoice.rs
lightning/src/offers/invoice_request.rs
lightning/src/offers/mod.rs
lightning/src/offers/offer.rs
lightning/src/offers/refund.rs
lightning/src/offers/signer.rs
lightning/src/offers/test_utils.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/util/base32.rs [new file with mode: 0644]
lightning/src/util/chacha20.rs
lightning/src/util/crypto.rs
lightning/src/util/enforcing_trait_impls.rs [deleted file]
lightning/src/util/message_signing.rs
lightning/src/util/mod.rs
lightning/src/util/persist.rs
lightning/src/util/test_channel_signer.rs [new file with mode: 0644]
lightning/src/util/test_utils.rs
lightning/src/util/zbase32.rs [deleted file]
pending_changelog/invoice_request_failed_downgrade.txt [new file with mode: 0644]
pending_changelog/kvstore.txt [new file with mode: 0644]
pending_changelog/move_netaddress_to_socketaddress.txt [new file with mode: 0644]
pending_changelog/new_channel_id_type_pr_2485.txt [new file with mode: 0644]
pending_changelog/routes_route_params.txt [new file with mode: 0644]

index 54799f44c951422bf8f81bafd88e62dee1e4355c..bc4bd010822ddc24d84c3d761f7da63d8e7fad99 100644 (file)
@@ -15,7 +15,7 @@ criterion_group!(benches,
        lightning::routing::router::benches::generate_large_mpp_routes_with_probabilistic_scorer,
        lightning::sign::benches::bench_get_secure_random_bytes,
        lightning::ln::channelmanager::bench::bench_sends,
-       lightning_persister::bench::bench_sends,
+       lightning_persister::fs_store::bench::bench_sends,
        lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
        lightning::routing::gossip::benches::read_network_graph,
        lightning::routing::gossip::benches::write_network_graph);
index ef9ecf7b86d8653d5bdf1b7ab758032425bd33d6..8c675a654be9304d3066b2af4e776dc6492ab0b3 100755 (executable)
@@ -32,6 +32,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
 # The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
 [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
 
+# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
+[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
+
 [ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
 
 export RUST_BACKTRACE=1
diff --git a/fuzz/src/base32.rs b/fuzz/src/base32.rs
new file mode 100644 (file)
index 0000000..8171f19
--- /dev/null
@@ -0,0 +1,52 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use lightning::util::base32;
+
+use crate::utils::test_logger;
+
+#[inline]
+pub fn do_test(data: &[u8]) {
+       if let Ok(s) = std::str::from_utf8(data) {
+               let first_decoding = base32::Alphabet::RFC4648 { padding: true }.decode(s);
+               if let Ok(first_decoding) = first_decoding {
+                       let encoding_response = base32::Alphabet::RFC4648 { padding: true }.encode(&first_decoding);
+                       assert_eq!(encoding_response, s.to_ascii_uppercase());
+                       let second_decoding = base32::Alphabet::RFC4648 { padding: true }.decode(&encoding_response).unwrap();
+                       assert_eq!(first_decoding, second_decoding);
+               }
+       }
+
+       if let Ok(s) = std::str::from_utf8(data) {
+               let first_decoding = base32::Alphabet::RFC4648 { padding: false }.decode(s);
+               if let Ok(first_decoding) = first_decoding {
+                       let encoding_response = base32::Alphabet::RFC4648 { padding: false }.encode(&first_decoding);
+                       assert_eq!(encoding_response, s.to_ascii_uppercase());
+                       let second_decoding = base32::Alphabet::RFC4648 { padding: false }.decode(&encoding_response).unwrap();
+                       assert_eq!(first_decoding, second_decoding);
+               }
+       }
+       
+       let encode_response = base32::Alphabet::RFC4648 { padding: false }.encode(&data);
+       let decode_response = base32::Alphabet::RFC4648 { padding: false }.decode(&encode_response).unwrap();
+       assert_eq!(data, decode_response);
+
+       let encode_response = base32::Alphabet::RFC4648 { padding: true }.encode(&data);
+       let decode_response = base32::Alphabet::RFC4648 { padding: true }.decode(&encode_response).unwrap();
+       assert_eq!(data, decode_response);
+}
+
+pub fn base32_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       do_test(data);
+}
+
+#[no_mangle]
+pub extern "C" fn base32_run(data: *const u8, datalen: usize) {
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
+}
diff --git a/fuzz/src/bin/base32_target.rs b/fuzz/src/bin/base32_target.rs
new file mode 100644 (file)
index 0000000..a7951c7
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::base32::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               base32_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       base32_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       base32_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       base32_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               base32_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/base32") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               base32_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
diff --git a/fuzz/src/bin/fromstr_to_netaddress_target.rs b/fuzz/src/bin/fromstr_to_netaddress_target.rs
new file mode 100644 (file)
index 0000000..29c984e
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::fromstr_to_netaddress::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               fromstr_to_netaddress_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       fromstr_to_netaddress_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       fromstr_to_netaddress_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       fromstr_to_netaddress_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               fromstr_to_netaddress_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/fromstr_to_netaddress") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               fromstr_to_netaddress_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
index fe17e4bab8ff356c599587446a65ec6c1033528a..2fa7debdf468670d5cf1da0e7ba9fa215df94c31 100755 (executable)
@@ -21,6 +21,8 @@ GEN_TEST router
 GEN_TEST zbase32
 GEN_TEST indexedmap
 GEN_TEST onion_hop_data
+GEN_TEST base32
+GEN_TEST fromstr_to_netaddress
 
 GEN_TEST msg_accept_channel msg_targets::
 GEN_TEST msg_announcement_signatures msg_targets::
index 296b3a03e9c1b85b8df7567efb93a6e3b3125937..4c79f0bee27f41e527bd40efdd51051e7dac15c9 100644 (file)
@@ -46,7 +46,7 @@ use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::errors::APIError;
 use lightning::util::logger::Logger;
 use lightning::util::config::UserConfig;
@@ -118,7 +118,7 @@ struct TestChainMonitor {
        pub logger: Arc<dyn Logger>,
        pub keys: Arc<KeyProvider>,
        pub persister: Arc<TestPersister>,
-       pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       pub chain_monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
        // logic will automatically force-close our channels for us (as we don't have an up-to-date
        // monitor implying we are not able to punish misbehaving counterparties). Because this test
@@ -139,8 +139,8 @@ impl TestChainMonitor {
                }
        }
 }
-impl chain::Watch<EnforcingSigner> for TestChainMonitor {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
+impl chain::Watch<TestChannelSigner> for TestChainMonitor {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                let mut ser = VecWriter(Vec::new());
                monitor.write(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
@@ -156,7 +156,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                        hash_map::Entry::Occupied(entry) => entry,
                        hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
                };
-               let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
+               let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
                        read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
                deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
                let mut ser = VecWriter(Vec::new());
@@ -234,7 +234,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
@@ -257,7 +257,7 @@ impl SignerProvider for KeyProvider {
                        channel_keys_id,
                );
                let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, revoked_commitment, false)
+               TestChannelSigner::new_with_revoked(keys, revoked_commitment, false)
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
@@ -266,7 +266,7 @@ impl SignerProvider for KeyProvider {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner {
+               Ok(TestChannelSigner {
                        inner,
                        state,
                        disable_revocation_policy_check: false,
@@ -376,7 +376,7 @@ fn send_payment(source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, p
                        fee_msat: amt,
                        cltv_expiry_delta: 200,
                }], blinded_tail: None }],
-               payment_params: None,
+               route_params: None,
        }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
                check_payment_err(err, amt > max_value_sendable || amt < min_value_sendable);
                false
@@ -409,7 +409,7 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des
                        channel_features: middle.channel_features(),
                        fee_msat: first_hop_fee,
                        cltv_expiry_delta: 100,
-               },RouteHop {
+               }, RouteHop {
                        pubkey: dest.get_our_node_id(),
                        node_features: dest.node_features(),
                        short_channel_id: dest_chan_id,
@@ -417,7 +417,7 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des
                        fee_msat: amt,
                        cltv_expiry_delta: 200,
                }], blinded_tail: None }],
-               payment_params: None,
+               route_params: None,
        }, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_id)) {
                let sent_amt = amt + first_hop_fee;
                check_payment_err(err, sent_amt < min_value_sendable || sent_amt > max_value_sendable);
@@ -477,7 +477,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        let mut monitors = HashMap::new();
                        let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
                        for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
-                               monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
+                               monitors.insert(outpoint, <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
                                chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
                        }
                        let mut monitor_refs = HashMap::new();
index 61744ace7c733419149455dc21ab70fe7e496b60..8d425357c96844b2b9766de612879750f4dceddc 100644 (file)
@@ -4,7 +4,7 @@
 use bitcoin::hash_types::BlockHash;
 
 use lightning::chain::channelmonitor;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::ser::{ReadableArgs, Writer, Writeable};
 use lightning::util::test_utils::OnlyReadsKeysInterface;
 
@@ -22,10 +22,10 @@ impl Writer for VecWriter {
 
 #[inline]
 pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
-       if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
+       if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
                let mut w = VecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
-               let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
+               let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
                assert!(latest_block_hash == deserialized_copy.0);
                assert!(monitor == deserialized_copy.1);
        }
diff --git a/fuzz/src/fromstr_to_netaddress.rs b/fuzz/src/fromstr_to_netaddress.rs
new file mode 100644 (file)
index 0000000..dba2d44
--- /dev/null
@@ -0,0 +1,31 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use lightning::ln::msgs::SocketAddress;
+use core::str::FromStr;
+
+use crate::utils::test_logger;
+
+#[inline]
+pub fn do_test(data: &[u8]) {
+       if let Ok(s) = std::str::from_utf8(data) {
+               let _ = SocketAddress::from_str(s);
+       }
+
+}
+
+pub fn fromstr_to_netaddress_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       do_test(data);
+}
+
+#[no_mangle]
+pub extern "C" fn fromstr_to_netaddress_run(data: *const u8, datalen: usize) {
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
+}
+
index 67ff2a5874c2832c9201f54735272ad612bb7c43..d7167146cac18d90c58d31f617f5af677875bbbf 100644 (file)
@@ -47,7 +47,7 @@ use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
 use lightning::util::config::{UserConfig, MaxDustHTLCExposure};
 use lightning::util::errors::APIError;
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::logger::Logger;
 use lightning::util::ser::{ReadableArgs, Writeable};
 
@@ -180,13 +180,13 @@ impl<'a> std::hash::Hash for Peer<'a> {
 }
 
 type ChannelMan<'a> = ChannelManager<
-       Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<FuzzEstimator>, &'a FuzzRouter, Arc<dyn Logger>>;
 type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan<'a>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn UtxoLookup>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler, Arc<KeyProvider>>;
 
 struct MoneyLossDetector<'a> {
        manager: Arc<ChannelMan<'a>>,
-       monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        handler: PeerMan<'a>,
 
        peers: &'a RefCell<[bool; 256]>,
@@ -200,7 +200,7 @@ struct MoneyLossDetector<'a> {
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
                   manager: Arc<ChannelMan<'a>>,
-                  monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+                  monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
                   handler: PeerMan<'a>) -> Self {
                MoneyLossDetector {
                        manager,
@@ -339,7 +339,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
@@ -351,7 +351,7 @@ impl SignerProvider for KeyProvider {
                let secp_ctx = Secp256k1::signing_only();
                let ctr = channel_keys_id[0];
                let (inbound, state) = self.signer_state.borrow().get(&ctr).unwrap().clone();
-               EnforcingSigner::new_with_revoked(if inbound {
+               TestChannelSigner::new_with_revoked(if inbound {
                        InMemorySigner::new(
                                &secp_ctx,
                                SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ctr]).unwrap(),
@@ -380,11 +380,11 @@ impl SignerProvider for KeyProvider {
                }, state, false)
        }
 
-       fn read_chan_signer(&self, mut data: &[u8]) -> Result<EnforcingSigner, DecodeError> {
+       fn read_chan_signer(&self, mut data: &[u8]) -> Result<TestChannelSigner, DecodeError> {
                let inner: InMemorySigner = ReadableArgs::read(&mut data, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        false
@@ -527,10 +527,8 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                        4 => {
                                let final_value_msat = slice_to_be24(get_slice!(3)) as u64;
                                let payment_params = PaymentParameters::from_node_id(get_pubkey!(), 42);
-                               let params = RouteParameters {
-                                       payment_params,
-                                       final_value_msat,
-                               };
+                               let params = RouteParameters::from_payment_params_and_value(
+                                       payment_params, final_value_msat);
                                let mut payment_hash = PaymentHash([0; 32]);
                                payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
                                let mut sha = Sha256::engine();
@@ -548,10 +546,8 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                        15 => {
                                let final_value_msat = slice_to_be24(get_slice!(3)) as u64;
                                let payment_params = PaymentParameters::from_node_id(get_pubkey!(), 42);
-                               let params = RouteParameters {
-                                       payment_params,
-                                       final_value_msat,
-                               };
+                               let params = RouteParameters::from_payment_params_and_value(
+                                       payment_params, final_value_msat);
                                let mut payment_hash = PaymentHash([0; 32]);
                                payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
                                let mut sha = Sha256::engine();
index 6cdeb8ab5d205f66be9083f035ea767d878cfad8..5b5cd69cf9681ac2d1fa5c8b75a52eebbd0447a5 100644 (file)
@@ -29,5 +29,7 @@ pub mod refund_deser;
 pub mod router;
 pub mod zbase32;
 pub mod onion_hop_data;
+pub mod base32;
+pub mod fromstr_to_netaddress;
 
 pub mod msg_targets;
index 0ffc090ea197514c8ab5eacf3e2a13030b11f759..d2e35cd45cdbabc7745abd4f49c8cdf94e793362 100644 (file)
@@ -11,7 +11,7 @@ use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
 use lightning::ln::script::ShutdownScript;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::logger::Logger;
 use lightning::util::ser::{Readable, Writeable, Writer};
 use lightning::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessagePath, OnionMessenger};
@@ -174,7 +174,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!() }
 
@@ -182,7 +182,7 @@ impl SignerProvider for KeyProvider {
                unreachable!()
        }
 
-       fn read_chan_signer(&self, _data: &[u8]) -> Result<EnforcingSigner, DecodeError> { unreachable!() }
+       fn read_chan_signer(&self, _data: &[u8]) -> Result<TestChannelSigner, DecodeError> { unreachable!() }
 
        fn get_destination_script(&self) -> Result<Script, ()> { unreachable!() }
 
index 830f6f4e201cb3d91155dca468492d586133b031..b7d45bf729f4d6c488c5916de536a5cf6f02f6dc 100644 (file)
@@ -326,11 +326,10 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                let mut last_hops = Vec::new();
                                last_hops!(last_hops);
                                find_routes!(first_hops, node_pks.iter(), |final_amt, final_delta, target: &PublicKey| {
-                                       RouteParameters {
-                                               payment_params: PaymentParameters::from_node_id(*target, final_delta)
+                                       RouteParameters::from_payment_params_and_value(
+                                               PaymentParameters::from_node_id(*target, final_delta)
                                                        .with_route_hints(last_hops.clone()).unwrap(),
-                                               final_value_msat: final_amt,
-                                       }
+                                               final_amt)
                                });
                        },
                        x => {
@@ -366,11 +365,9 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                let mut features = Bolt12InvoiceFeatures::empty();
                                features.set_basic_mpp_optional();
                                find_routes!(first_hops, vec![dummy_pk].iter(), |final_amt, _, _| {
-                                       RouteParameters {
-                                               payment_params: PaymentParameters::blinded(last_hops.clone())
-                                                       .with_bolt12_features(features.clone()).unwrap(),
-                                               final_value_msat: final_amt,
-                                       }
+                                       RouteParameters::from_payment_params_and_value(PaymentParameters::blinded(last_hops.clone())
+                                               .with_bolt12_features(features.clone()).unwrap(),
+                                       final_amt)
                                });
                        }
                }
index e3635297adb037500adafebd98708025c99fa762..89de25aa5e6a192786a36b8d97605dbabf86b25d 100644 (file)
@@ -2,19 +2,19 @@ use lightning::chain;
 use lightning::chain::{chainmonitor, channelmonitor};
 use lightning::chain::chainmonitor::MonitorUpdateId;
 use lightning::chain::transaction::OutPoint;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 
 use std::sync::Mutex;
 
 pub struct TestPersister {
        pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
 }
-impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+impl chainmonitor::Persist<TestChannelSigner> for TestPersister {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 }
index 5ea453bfb86934f24a93e7e2580b592f4460c61f..04979204e9898ddab007b59d3efceefa3d5086be 100644 (file)
@@ -7,18 +7,19 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use lightning::util::zbase32;
+use lightning::util::base32;
 
 use crate::utils::test_logger;
 
 #[inline]
 pub fn do_test(data: &[u8]) {
-       let res = zbase32::encode(data);
-       assert_eq!(&zbase32::decode(&res).unwrap()[..], data);
+       let res = base32::Alphabet::ZBase32.encode(data);
+       assert_eq!(&base32::Alphabet::ZBase32.decode(&res).unwrap()[..], data);
 
        if let Ok(s) = std::str::from_utf8(data) {
-               if let Ok(decoded) = zbase32::decode(s) {
-                       assert_eq!(&zbase32::encode(&decoded), &s.to_ascii_lowercase());
+               let res = base32::Alphabet::ZBase32.decode(s);
+               if let Ok(decoded) = res {
+                       assert_eq!(&base32::Alphabet::ZBase32.encode(&decoded), &s.to_ascii_lowercase());
                }
        }
 }
index 9b5a6d4553645215a32f6679c90d1b482d3d5c16..cad0ac4d822d7cd9e46e3acd39977e58e5e3ffda 100644 (file)
@@ -14,6 +14,8 @@ void router_run(const unsigned char* data, size_t data_len);
 void zbase32_run(const unsigned char* data, size_t data_len);
 void indexedmap_run(const unsigned char* data, size_t data_len);
 void onion_hop_data_run(const unsigned char* data, size_t data_len);
+void base32_run(const unsigned char* data, size_t data_len);
+void fromstr_to_netaddress_run(const unsigned char* data, size_t data_len);
 void msg_accept_channel_run(const unsigned char* data, size_t data_len);
 void msg_announcement_signatures_run(const unsigned char* data, size_t data_len);
 void msg_channel_reestablish_run(const unsigned char* data, size_t data_len);
index 8648920ec2c197be752ab4e8cc8959f08ee14d87..353ed6738d686698a9dd88079587316019f738d2 100644 (file)
@@ -500,9 +500,16 @@ use core::task;
 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
 /// could setup `process_events_async` like this:
 /// ```
-/// # struct MyPersister {}
-/// # impl lightning::util::persist::KVStorePersister for MyPersister {
-/// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
+/// # use lightning::io;
+/// # use std::sync::{Arc, Mutex};
+/// # use std::sync::atomic::{AtomicBool, Ordering};
+/// # use lightning_background_processor::{process_events_async, GossipSync};
+/// # struct MyStore {}
+/// # impl lightning::util::persist::KVStore for MyStore {
+/// #     fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
+/// #     fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
+/// #     fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
+/// #     fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
 /// # }
 /// # struct MyEventHandler {}
 /// # impl MyEventHandler {
@@ -514,23 +521,20 @@ use core::task;
 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
 /// #     fn disconnect_socket(&mut self) {}
 /// # }
-/// # use std::sync::{Arc, Mutex};
-/// # use std::sync::atomic::{AtomicBool, Ordering};
-/// # use lightning_background_processor::{process_events_async, GossipSync};
 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
-/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
+/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
 ///
-/// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
+/// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
 ///    let background_persister = Arc::clone(&my_persister);
 ///    let background_event_handler = Arc::clone(&my_event_handler);
 ///    let background_chain_mon = Arc::clone(&my_chain_monitor);
@@ -866,8 +870,8 @@ mod tests {
        use lightning::util::config::UserConfig;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
-       use lightning::util::persist::KVStorePersister;
-       use lightning_persister::FilesystemPersister;
+       use lightning::util::persist::{KVStore, CHANNEL_MANAGER_PERSISTENCE_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_SUB_NAMESPACE, SCORER_PERSISTENCE_KEY};
+       use lightning_persister::fs_store::FilesystemStore;
        use std::collections::VecDeque;
        use std::{fs, env};
        use std::path::PathBuf;
@@ -906,7 +910,7 @@ mod tests {
                        >,
                        Arc<test_utils::TestLogger>>;
 
-       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
+       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
 
        type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
        type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
@@ -917,7 +921,7 @@ mod tests {
                rapid_gossip_sync: RGS,
                peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
                chain_monitor: Arc<ChainMonitor>,
-               persister: Arc<FilesystemPersister>,
+               kv_store: Arc<FilesystemStore>,
                tx_broadcaster: Arc<test_utils::TestBroadcaster>,
                network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
                logger: Arc<test_utils::TestLogger>,
@@ -941,9 +945,9 @@ mod tests {
 
        impl Drop for Node {
                fn drop(&mut self) {
-                       let data_dir = self.persister.get_data_dir();
+                       let data_dir = self.kv_store.get_data_dir();
                        match fs::remove_dir_all(data_dir.clone()) {
-                               Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
+                               Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
                                _ => {}
                        }
                }
@@ -954,13 +958,13 @@ mod tests {
                graph_persistence_notifier: Option<SyncSender<()>>,
                manager_error: Option<(std::io::ErrorKind, &'static str)>,
                scorer_error: Option<(std::io::ErrorKind, &'static str)>,
-               filesystem_persister: FilesystemPersister,
+               kv_store: FilesystemStore,
        }
 
        impl Persister {
-               fn new(data_dir: String) -> Self {
-                       let filesystem_persister = FilesystemPersister::new(data_dir);
-                       Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
+               fn new(data_dir: PathBuf) -> Self {
+                       let kv_store = FilesystemStore::new(data_dir);
+                       Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
                }
 
                fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
@@ -980,15 +984,25 @@ mod tests {
                }
        }
 
-       impl KVStorePersister for Persister {
-               fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
-                       if key == "manager" {
+       impl KVStore for Persister {
+               fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
+                       self.kv_store.read(namespace, sub_namespace, key)
+               }
+
+               fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
+                       if namespace == CHANNEL_MANAGER_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE &&
+                               key == CHANNEL_MANAGER_PERSISTENCE_KEY
+                       {
                                if let Some((error, message)) = self.manager_error {
                                        return Err(std::io::Error::new(error, message))
                                }
                        }
 
-                       if key == "network_graph" {
+                       if namespace == NETWORK_GRAPH_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE &&
+                               key == NETWORK_GRAPH_PERSISTENCE_KEY
+                       {
                                if let Some(sender) = &self.graph_persistence_notifier {
                                        match sender.send(()) {
                                                Ok(()) => {},
@@ -1001,13 +1015,24 @@ mod tests {
                                }
                        }
 
-                       if key == "scorer" {
+                       if namespace == SCORER_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == SCORER_PERSISTENCE_SUB_NAMESPACE &&
+                               key == SCORER_PERSISTENCE_KEY
+                       {
                                if let Some((error, message)) = self.scorer_error {
                                        return Err(std::io::Error::new(error, message))
                                }
                        }
 
-                       self.filesystem_persister.persist(key, object)
+                       self.kv_store.write(namespace, sub_namespace, key, buf)
+               }
+
+               fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
+                       self.kv_store.remove(namespace, sub_namespace, key, lazy)
+               }
+
+               fn list(&self, namespace: &str, sub_namespace: &str) -> lightning::io::Result<Vec<String>> {
+                       self.kv_store.list(namespace, sub_namespace)
                }
        }
 
@@ -1157,10 +1182,10 @@ mod tests {
                        let seed = [i as u8; 32];
                        let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
                        let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
-                       let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
+                       let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
                        let now = Duration::from_secs(genesis_block.header.time as u64);
                        let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
-                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
+                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
                        let best_block = BestBlock::from_network(network);
                        let params = ChainParameters { network, best_block };
                        let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
@@ -1172,7 +1197,7 @@ mod tests {
                                onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
                        };
                        let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
-                       let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
+                       let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
                        nodes.push(node);
                }
 
@@ -1267,7 +1292,7 @@ mod tests {
                let tx = open_channel!(nodes[0], nodes[1], 100000);
 
                // Initiate the background processors to watch each node.
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1332,7 +1357,7 @@ mod tests {
                // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
                // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
                let (_, nodes) = create_nodes(1, "test_timer_tick_called");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1359,7 +1384,7 @@ mod tests {
                let (_, nodes) = create_nodes(2, "test_persist_error");
                open_channel!(nodes[0], nodes[1], 100000);
 
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1379,7 +1404,7 @@ mod tests {
                let (_, nodes) = create_nodes(2, "test_persist_error_sync");
                open_channel!(nodes[0], nodes[1], 100000);
 
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
 
                let bp_future = super::process_events_async(
@@ -1405,7 +1430,7 @@ mod tests {
        fn test_network_graph_persist_error() {
                // Test that if we encounter an error during network graph persistence, an error gets returned.
                let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1423,7 +1448,7 @@ mod tests {
        fn test_scorer_persist_error() {
                // Test that if we encounter an error during scorer persistence, an error gets returned.
                let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1441,7 +1466,7 @@ mod tests {
        fn test_background_event_handling() {
                let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
                let channel_value = 100000;
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir.clone()));
 
                // Set up a background event handler for FundingGenerationReady events.
@@ -1514,7 +1539,7 @@ mod tests {
        #[test]
        fn test_scorer_persistence() {
                let (_, nodes) = create_nodes(2, "test_scorer_persistence");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1586,7 +1611,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
 
                let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
 
                let event_handler = |_: _| {};
@@ -1605,7 +1630,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
 
                let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
 
                let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
@@ -1745,7 +1770,7 @@ mod tests {
                };
 
                let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
@@ -1778,7 +1803,7 @@ mod tests {
                };
 
                let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
 
                let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
index 744c2654cd84705965999d09f602e4c17f2de3dd..a512b2de05dd74eb7125bf543d44fd0dbe5ec315 100644 (file)
@@ -869,10 +869,8 @@ mod test {
                                invoice.min_final_cltv_expiry_delta() as u32)
                        .with_bolt11_features(invoice.features().unwrap().clone()).unwrap()
                        .with_route_hints(invoice.route_hints()).unwrap();
-               let route_params = RouteParameters {
-                       payment_params,
-                       final_value_msat: invoice.amount_milli_satoshis().unwrap(),
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, invoice.amount_milli_satoshis().unwrap());
                let payment_event = {
                        let mut payment_hash = PaymentHash([0; 32]);
                        payment_hash.0.copy_from_slice(&invoice.payment_hash().as_ref()[0..32]);
@@ -1326,10 +1324,8 @@ mod test {
                                invoice.min_final_cltv_expiry_delta() as u32)
                        .with_bolt11_features(invoice.features().unwrap().clone()).unwrap()
                        .with_route_hints(invoice.route_hints()).unwrap();
-               let params = RouteParameters {
-                       payment_params,
-                       final_value_msat: invoice.amount_milli_satoshis().unwrap(),
-               };
+               let params = RouteParameters::from_payment_params_and_value(
+                       payment_params, invoice.amount_milli_satoshis().unwrap());
                let (payment_event, fwd_idx) = {
                        let mut payment_hash = PaymentHash([0; 32]);
                        payment_hash.0.copy_from_slice(&invoice.payment_hash().as_ref()[0..32]);
index c2befdae51cbf5d44d22014cb364c4fde39a6661..d2ac6e5474543db1392926dc8dfebf07715b0d86 100644 (file)
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] }
+tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
 lightning = { version = "0.0.116", path = "../lightning", features = ["_test_utils"] }
index d6d8004164bf9ab54db94ee64b55ef63f5f57e74..06aed3194f9bc18e27d146ea9cbedc23604edfe8 100644 (file)
 
 use bitcoin::secp256k1::PublicKey;
 
-use tokio::net::TcpStream;
+use tokio::net::{tcp, TcpStream};
 use tokio::{io, time};
 use tokio::sync::mpsc;
-use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
+use tokio::io::AsyncWrite;
 
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
 use lightning::ln::peer_handler::APeerManager;
-use lightning::ln::msgs::NetAddress;
+use lightning::ln::msgs::SocketAddress;
 
 use std::ops::Deref;
 use std::task::{self, Poll};
@@ -59,7 +59,7 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
 // define a trivial two- and three- select macro with the specific types we need and just use that.
 
 pub(crate) enum SelectorOutput {
-       A(Option<()>), B(Option<()>), C(tokio::io::Result<usize>),
+       A(Option<()>), B(Option<()>), C(tokio::io::Result<()>),
 }
 
 pub(crate) struct TwoSelector<
@@ -87,7 +87,7 @@ impl<
 }
 
 pub(crate) struct ThreeSelector<
-       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
 > {
        pub a: A,
        pub b: B,
@@ -95,7 +95,7 @@ pub(crate) struct ThreeSelector<
 }
 
 impl<
-       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
 > Future for ThreeSelector<A, B, C> {
        type Output = SelectorOutput;
        fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
@@ -119,7 +119,7 @@ impl<
 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
 /// read future (which is returned by schedule_read).
 struct Connection {
-       writer: Option<io::WriteHalf<TcpStream>>,
+       writer: Option<Arc<TcpStream>>,
        // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
        // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
        // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
@@ -156,7 +156,7 @@ impl Connection {
        async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
                peer_manager: PM,
                us: Arc<Mutex<Self>>,
-               mut reader: io::ReadHalf<TcpStream>,
+               reader: Arc<TcpStream>,
                mut read_wake_receiver: mpsc::Receiver<()>,
                mut write_avail_receiver: mpsc::Receiver<()>,
        ) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
@@ -200,7 +200,7 @@ impl Connection {
                                ThreeSelector {
                                        a: Box::pin(write_avail_receiver.recv()),
                                        b: Box::pin(read_wake_receiver.recv()),
-                                       c: Box::pin(reader.read(&mut buf)),
+                                       c: Box::pin(reader.readable()),
                                }.await
                        };
                        match select_result {
@@ -211,8 +211,9 @@ impl Connection {
                                        }
                                },
                                SelectorOutput::B(_) => {},
-                               SelectorOutput::C(read) => {
-                                       match read {
+                               SelectorOutput::C(res) => {
+                                       if res.is_err() { break Disconnect::PeerDisconnected; }
+                                       match reader.try_read(&mut buf) {
                                                Ok(0) => break Disconnect::PeerDisconnected,
                                                Ok(len) => {
                                                        let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
@@ -226,7 +227,11 @@ impl Connection {
                                                                Err(_) => break Disconnect::CloseConnection,
                                                        }
                                                },
-                                               Err(_) => break Disconnect::PeerDisconnected,
+                                               Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+                                                       // readable() is allowed to spuriously wake, so we have to handle
+                                                       // WouldBlock here.
+                                               },
+                                               Err(e) => break Disconnect::PeerDisconnected,
                                        }
                                },
                        }
@@ -239,18 +244,14 @@ impl Connection {
                        // here.
                        let _ = tokio::task::yield_now().await;
                };
-               let writer_option = us.lock().unwrap().writer.take();
-               if let Some(mut writer) = writer_option {
-                       // If the socket is already closed, shutdown() will fail, so just ignore it.
-                       let _ = writer.shutdown().await;
-               }
+               us.lock().unwrap().writer.take();
                if let Disconnect::PeerDisconnected = disconnect_type {
                        peer_manager.as_ref().socket_disconnected(&our_descriptor);
                        peer_manager.as_ref().process_events();
                }
        }
 
-       fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+       fn new(stream: StdTcpStream) -> (Arc<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
                // We only ever need a channel of depth 1 here: if we returned a non-full write to the
                // PeerManager, we will eventually get notified that there is room in the socket to write
                // new bytes, which will generate an event. That event will be popped off the queue before
@@ -262,24 +263,24 @@ impl Connection {
                // false.
                let (read_waker, read_receiver) = mpsc::channel(1);
                stream.set_nonblocking(true).unwrap();
-               let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
+               let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap());
 
-               (reader, write_receiver, read_receiver,
+               (Arc::clone(&tokio_stream), write_receiver, read_receiver,
                Arc::new(Mutex::new(Self {
-                       writer: Some(writer), write_avail, read_waker, read_paused: false,
+                       writer: Some(tokio_stream), write_avail, read_waker, read_paused: false,
                        rl_requested_disconnect: false,
                        id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
                })))
        }
 }
 
-fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<SocketAddress> {
        match stream.peer_addr() {
-               Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+               Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 {
                        addr: sockaddr.ip().octets(),
                        port: sockaddr.port(),
                }),
-               Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+               Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 {
                        addr: sockaddr.ip().octets(),
                        port: sockaddr.port(),
                }),
@@ -462,9 +463,9 @@ impl SocketDescriptor {
 }
 impl peer_handler::SocketDescriptor for SocketDescriptor {
        fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
-               // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
-               // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
-               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
+               // To send data, we take a lock on our Connection to access the TcpStream, writing to it if
+               // there's room in the kernel buffer, or otherwise create a new Waker with a
+               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
                // processing future which will call write_buffer_space_avail and we'll end up back here.
                let mut us = self.conn.lock().unwrap();
                if us.writer.is_none() {
@@ -484,24 +485,18 @@ impl peer_handler::SocketDescriptor for SocketDescriptor {
                let mut ctx = task::Context::from_waker(&waker);
                let mut written_len = 0;
                loop {
-                       match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
-                               task::Poll::Ready(Ok(res)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(res, 0);
-                                       written_len += res;
-                                       if written_len == data.len() { return written_len; }
-                               },
-                               task::Poll::Ready(Err(e)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
-                                       // Probably we've already been closed, just return what we have and let the
-                                       // read thread handle closing logic.
-                                       return written_len;
+                       match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) {
+                               task::Poll::Ready(Ok(())) => {
+                                       match us.writer.as_ref().unwrap().try_write(&data[written_len..]) {
+                                               Ok(res) => {
+                                                       debug_assert_ne!(res, 0);
+                                                       written_len += res;
+                                                       if written_len == data.len() { return written_len; }
+                                               },
+                                               Err(e) => return written_len,
+                                       }
                                },
+                               task::Poll::Ready(Err(e)) => return written_len,
                                task::Poll::Pending => {
                                        // We're queued up for a write event now, but we need to make sure we also
                                        // pause read given we're now waiting on the remote end to ACK (and in
index 35bddc0774604fcd653e084946d06477d01ad239..361ab0c57a1d13fd9ee39543534c0bcbbbac4ccf 100644 (file)
@@ -3,9 +3,9 @@ name = "lightning-persister"
 version = "0.0.116"
 authors = ["Valentine Wallace", "Matt Corallo"]
 license = "MIT OR Apache-2.0"
-repository = "https://github.com/lightningdevkit/rust-lightning/"
+repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
-Utilities to manage Rust-Lightning channel data persistence and retrieval.
+Utilities for LDK data persistence and retrieval.
 """
 edition = "2018"
 
@@ -16,13 +16,13 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116", path = "../lightning" }
-libc = "0.2"
 
 [target.'cfg(windows)'.dependencies]
-winapi = { version = "0.3", features = ["winbase"] }
+windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
 
 [target.'cfg(ldk_bench)'.dependencies]
 criterion = { version = "0.4", optional = true, default-features = false }
 
 [dev-dependencies]
 lightning = { version = "0.0.116", path = "../lightning", features = ["_test_utils"] }
+bitcoin = { version = "0.29.0", default-features = false }
diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs
new file mode 100644 (file)
index 0000000..56d071d
--- /dev/null
@@ -0,0 +1,531 @@
+//! Objects related to [`FilesystemStore`] live here.
+use crate::utils::{check_namespace_key_validity, is_valid_kvstore_str};
+
+use lightning::util::persist::KVStore;
+use lightning::util::string::PrintableString;
+
+use std::collections::HashMap;
+use std::fs;
+use std::io::{Read, Write};
+use std::path::{Path, PathBuf};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex, RwLock};
+
+#[cfg(target_os = "windows")]
+use {std::ffi::OsStr, std::os::windows::ffi::OsStrExt};
+
+#[cfg(target_os = "windows")]
+macro_rules! call {
+       ($e: expr) => {
+               if $e != 0 {
+                       Ok(())
+               } else {
+                       Err(std::io::Error::last_os_error())
+               }
+       };
+}
+
+#[cfg(target_os = "windows")]
+fn path_to_windows_str<T: AsRef<OsStr>>(path: T) -> Vec<u16> {
+       path.as_ref().encode_wide().chain(Some(0)).collect()
+}
+
+// The number of read/write/remove/list operations after which we clean up our `locks` HashMap.
+const GC_LOCK_INTERVAL: usize = 25;
+
+/// A [`KVStore`] implementation that writes to and reads from the file system.
+pub struct FilesystemStore {
+       data_dir: PathBuf,
+       tmp_file_counter: AtomicUsize,
+       gc_counter: AtomicUsize,
+       locks: Mutex<HashMap<PathBuf, Arc<RwLock<()>>>>,
+}
+
+impl FilesystemStore {
+       /// Constructs a new [`FilesystemStore`].
+       pub fn new(data_dir: PathBuf) -> Self {
+               let locks = Mutex::new(HashMap::new());
+               let tmp_file_counter = AtomicUsize::new(0);
+               let gc_counter = AtomicUsize::new(1);
+               Self { data_dir, tmp_file_counter, gc_counter, locks }
+       }
+
+       /// Returns the data directory.
+       pub fn get_data_dir(&self) -> PathBuf {
+               self.data_dir.clone()
+       }
+
+       fn garbage_collect_locks(&self) {
+               let gc_counter = self.gc_counter.fetch_add(1, Ordering::AcqRel);
+
+               if gc_counter % GC_LOCK_INTERVAL == 0 {
+                       // Take outer lock for the cleanup.
+                       let mut outer_lock = self.locks.lock().unwrap();
+
+                       // Garbage collect all lock entries that are not referenced anymore.
+                       outer_lock.retain(|_, v| Arc::strong_count(&v) > 1);
+               }
+       }
+
+       fn get_dest_dir_path(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<PathBuf> {
+               let mut dest_dir_path = {
+                       #[cfg(target_os = "windows")]
+                       {
+                               let data_dir = self.data_dir.clone();
+                               fs::create_dir_all(data_dir.clone())?;
+                               fs::canonicalize(data_dir)?
+                       }
+                       #[cfg(not(target_os = "windows"))]
+                       {
+                               self.data_dir.clone()
+                       }
+               };
+
+               dest_dir_path.push(namespace);
+               if !sub_namespace.is_empty() {
+                       dest_dir_path.push(sub_namespace);
+               }
+
+               Ok(dest_dir_path)
+       }
+}
+
+impl KVStore for FilesystemStore {
+       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> std::io::Result<Vec<u8>> {
+               check_namespace_key_validity(namespace, sub_namespace, Some(key), "read")?;
+
+               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               dest_file_path.push(key);
+
+               let mut buf = Vec::new();
+               {
+                       let inner_lock_ref = {
+                               let mut outer_lock = self.locks.lock().unwrap();
+                               Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+                       };
+                       let _guard = inner_lock_ref.read().unwrap();
+
+                       let mut f = fs::File::open(dest_file_path)?;
+                       f.read_to_end(&mut buf)?;
+               }
+
+               self.garbage_collect_locks();
+
+               Ok(buf)
+       }
+
+       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> std::io::Result<()> {
+               check_namespace_key_validity(namespace, sub_namespace, Some(key), "write")?;
+
+               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               dest_file_path.push(key);
+
+               let parent_directory = dest_file_path
+                       .parent()
+                       .ok_or_else(|| {
+                               let msg =
+                                       format!("Could not retrieve parent directory of {}.", dest_file_path.display());
+                               std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
+                       })?;
+               fs::create_dir_all(&parent_directory)?;
+
+               // Do a crazy dance with lots of fsync()s to be overly cautious here...
+               // We never want to end up in a state where we've lost the old data, or end up using the
+               // old data on power loss after we've returned.
+               // The way to atomically write a file on Unix platforms is:
+               // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
+               let mut tmp_file_path = dest_file_path.clone();
+               let tmp_file_ext = format!("{}.tmp", self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
+               tmp_file_path.set_extension(tmp_file_ext);
+
+               {
+                       let mut tmp_file = fs::File::create(&tmp_file_path)?;
+                       tmp_file.write_all(&buf)?;
+                       tmp_file.sync_all()?;
+               }
+
+               let res = {
+                       let inner_lock_ref = {
+                               let mut outer_lock = self.locks.lock().unwrap();
+                               Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+                       };
+                       let _guard = inner_lock_ref.write().unwrap();
+
+                       #[cfg(not(target_os = "windows"))]
+                       {
+                               fs::rename(&tmp_file_path, &dest_file_path)?;
+                               let dir_file = fs::OpenOptions::new().read(true).open(&parent_directory)?;
+                               dir_file.sync_all()?;
+                               Ok(())
+                       }
+
+                       #[cfg(target_os = "windows")]
+                       {
+                               let res = if dest_file_path.exists() {
+                                       call!(unsafe {
+                                               windows_sys::Win32::Storage::FileSystem::ReplaceFileW(
+                                                       path_to_windows_str(dest_file_path.clone()).as_ptr(),
+                                                       path_to_windows_str(tmp_file_path).as_ptr(),
+                                                       std::ptr::null(),
+                                                       windows_sys::Win32::Storage::FileSystem::REPLACEFILE_IGNORE_MERGE_ERRORS,
+                                                       std::ptr::null_mut() as *const core::ffi::c_void,
+                                                       std::ptr::null_mut() as *const core::ffi::c_void,
+                                                       )
+                                       })
+                               } else {
+                                       call!(unsafe {
+                                               windows_sys::Win32::Storage::FileSystem::MoveFileExW(
+                                                       path_to_windows_str(tmp_file_path).as_ptr(),
+                                                       path_to_windows_str(dest_file_path.clone()).as_ptr(),
+                                                       windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
+                                                       | windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
+                                                       )
+                                       })
+                               };
+
+                               match res {
+                                       Ok(()) => {
+                                               // We fsync the dest file in hopes this will also flush the metadata to disk.
+                                               let dest_file = fs::OpenOptions::new().read(true).write(true)
+                                                       .open(&dest_file_path)?;
+                                               dest_file.sync_all()?;
+                                               Ok(())
+                                       }
+                                       Err(e) => Err(e),
+                               }
+                       }
+               };
+
+               self.garbage_collect_locks();
+
+               res
+       }
+
+       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> std::io::Result<()> {
+               check_namespace_key_validity(namespace, sub_namespace, Some(key), "remove")?;
+
+               let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+               dest_file_path.push(key);
+
+               if !dest_file_path.is_file() {
+                       return Ok(());
+               }
+
+               {
+                       let inner_lock_ref = {
+                               let mut outer_lock = self.locks.lock().unwrap();
+                               Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+                       };
+                       let _guard = inner_lock_ref.write().unwrap();
+
+                       if lazy {
+                               // If we're lazy we just call remove and be done with it.
+                               fs::remove_file(&dest_file_path)?;
+                       } else {
+                               // If we're not lazy we try our best to persist the updated metadata to ensure
+                               // atomicity of this call.
+                               #[cfg(not(target_os = "windows"))]
+                               {
+                                       fs::remove_file(&dest_file_path)?;
+
+                                       let parent_directory = dest_file_path.parent().ok_or_else(|| {
+                                               let msg =
+                                                       format!("Could not retrieve parent directory of {}.", dest_file_path.display());
+                                               std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
+                                       })?;
+                                       let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
+                                       // The above call to `fs::remove_file` corresponds to POSIX `unlink`, whose changes
+                                       // to the inode might get cached (and hence possibly lost on crash), depending on
+                                       // the target platform and file system.
+                                       //
+                                       // In order to assert we permanently removed the file in question we therefore
+                                       // call `fsync` on the parent directory on platforms that support it.
+                                       dir_file.sync_all()?;
+                               }
+
+                               #[cfg(target_os = "windows")]
+                               {
+                                       // Since Windows `DeleteFile` API is not persisted until the last open file handle
+                                       // is dropped, and there seemingly is no reliable way to flush the directory
+                                       // metadata, we here fall back to use a 'recycling bin' model, i.e., first move the
+                                       // file to be deleted to a temporary trash file and remove the latter file
+                                       // afterwards.
+                                       //
+                                       // This should be marginally better, as, according to the documentation,
+                                       // `MoveFileExW` APIs should offer stronger persistence guarantees,
+                                       // at least if `MOVEFILE_WRITE_THROUGH`/`MOVEFILE_REPLACE_EXISTING` is set.
+                                       // However, all this is partially based on assumptions and local experiments, as
+                                       // Windows API is horribly underdocumented.
+                                       let mut trash_file_path = dest_file_path.clone();
+                                       let trash_file_ext = format!("{}.trash",
+                                               self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
+                                       trash_file_path.set_extension(trash_file_ext);
+
+                                       call!(unsafe {
+                                               windows_sys::Win32::Storage::FileSystem::MoveFileExW(
+                                                       path_to_windows_str(dest_file_path).as_ptr(),
+                                                       path_to_windows_str(trash_file_path.clone()).as_ptr(),
+                                                       windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
+                                                       | windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
+                                                       )
+                                       })?;
+
+                                       {
+                                               // We fsync the trash file in hopes this will also flush the original's file
+                                               // metadata to disk.
+                                               let trash_file = fs::OpenOptions::new().read(true).write(true)
+                                                       .open(&trash_file_path.clone())?;
+                                               trash_file.sync_all()?;
+                                       }
+
+                                       // We're fine if this remove would fail as the trash file will be cleaned up in
+                                       // list eventually.
+                                       fs::remove_file(trash_file_path).ok();
+                               }
+                       }
+               }
+
+               self.garbage_collect_locks();
+
+               Ok(())
+       }
+
+       fn list(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<Vec<String>> {
+               check_namespace_key_validity(namespace, sub_namespace, None, "list")?;
+
+               let prefixed_dest = self.get_dest_dir_path(namespace, sub_namespace)?;
+               let mut keys = Vec::new();
+
+               if !Path::new(&prefixed_dest).exists() {
+                       return Ok(Vec::new());
+               }
+
+               for entry in fs::read_dir(&prefixed_dest)? {
+                       let entry = entry?;
+                       let p = entry.path();
+
+                       if let Some(ext) = p.extension() {
+                               #[cfg(target_os = "windows")]
+                               {
+                                       // Clean up any trash files lying around.
+                                       if ext == "trash" {
+                                               fs::remove_file(p).ok();
+                                               continue;
+                                       }
+                               }
+                               if ext == "tmp" {
+                                       continue;
+                               }
+                       }
+
+                       let metadata = p.metadata()?;
+
+                       // We allow the presence of directories in the empty namespace and just skip them.
+                       if metadata.is_dir() {
+                               continue;
+                       }
+
+                       // If we otherwise don't find a file at the given path something went wrong.
+                       if !metadata.is_file() {
+                               debug_assert!(false, "Failed to list keys of {}/{}: file couldn't be accessed.",
+                                       PrintableString(namespace), PrintableString(sub_namespace));
+                               let msg = format!("Failed to list keys of {}/{}: file couldn't be accessed.",
+                                       PrintableString(namespace), PrintableString(sub_namespace));
+                               return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+                       }
+
+                       match p.strip_prefix(&prefixed_dest) {
+                               Ok(stripped_path) => {
+                                       if let Some(relative_path) = stripped_path.to_str() {
+                                               if is_valid_kvstore_str(relative_path) {
+                                                       keys.push(relative_path.to_string())
+                                               }
+                                       } else {
+                                               debug_assert!(false, "Failed to list keys of {}/{}: file path is not valid UTF-8",
+                                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                               let msg = format!("Failed to list keys of {}/{}: file path is not valid UTF-8",
+                                                       PrintableString(namespace), PrintableString(sub_namespace));
+                                               return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+                                       }
+                               }
+                               Err(e) => {
+                                       debug_assert!(false, "Failed to list keys of {}/{}: {}",
+                                               PrintableString(namespace), PrintableString(sub_namespace), e);
+                                       let msg = format!("Failed to list keys of {}/{}: {}",
+                                               PrintableString(namespace), PrintableString(sub_namespace), e);
+                                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+                               }
+                       }
+               }
+
+               self.garbage_collect_locks();
+
+               Ok(keys)
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+       use crate::test_utils::{do_read_write_remove_list_persist, do_test_store};
+
+       use bitcoin::hashes::hex::FromHex;
+       use bitcoin::Txid;
+
+       use lightning::chain::ChannelMonitorUpdateStatus;
+       use lightning::chain::chainmonitor::Persist;
+       use lightning::chain::transaction::OutPoint;
+       use lightning::check_closed_event;
+       use lightning::events::{ClosureReason, MessageSendEventsProvider};
+       use lightning::ln::functional_test_utils::*;
+       use lightning::util::test_utils;
+       use lightning::util::persist::read_channel_monitors;
+       use std::fs;
+       #[cfg(target_os = "windows")]
+       use {
+               lightning::get_event_msg,
+               lightning::ln::msgs::ChannelMessageHandler,
+       };
+
+       impl Drop for FilesystemStore {
+               fn drop(&mut self) {
+                       // We test for invalid directory names, so it's OK if directory removal
+                       // fails.
+                       match fs::remove_dir_all(&self.data_dir) {
+                               Err(e) => println!("Failed to remove test persister directory: {}", e),
+                               _ => {}
+                       }
+               }
+       }
+
+       #[test]
+       fn read_write_remove_list_persist() {
+               let mut temp_path = std::env::temp_dir();
+               temp_path.push("test_read_write_remove_list_persist");
+               let fs_store = FilesystemStore::new(temp_path);
+               do_read_write_remove_list_persist(&fs_store);
+       }
+
+       #[test]
+       fn test_if_monitors_is_not_dir() {
+               let store = FilesystemStore::new("test_monitors_is_not_dir".into());
+
+               fs::create_dir_all(&store.get_data_dir()).unwrap();
+               let mut path = std::path::PathBuf::from(&store.get_data_dir());
+               path.push("monitors");
+               fs::File::create(path).unwrap();
+
+               let chanmon_cfgs = create_chanmon_cfgs(1);
+               let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
+               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &store, node_cfgs[0].keys_manager);
+               node_cfgs[0].chain_monitor = chain_mon_0;
+               let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
+               let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
+
+               // Check that read_channel_monitors() returns error if monitors/ is not a
+               // directory.
+               assert!(read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err());
+       }
+
+       #[test]
+       fn test_filesystem_store() {
+               // Create the nodes, giving them FilesystemStores for data stores.
+               let store_0 = FilesystemStore::new("test_filesystem_store_0".into());
+               let store_1 = FilesystemStore::new("test_filesystem_store_1".into());
+               do_test_store(&store_0, &store_1)
+       }
+
+       // Test that if the store's path to channel data is read-only, writing a
+       // monitor to it results in the store returning a PermanentFailure.
+       // Windows ignores the read-only flag for folders, so this test is Unix-only.
+       #[cfg(not(target_os = "windows"))]
+       #[test]
+       fn test_readonly_dir_perm_failure() {
+               let store = FilesystemStore::new("test_readonly_dir_perm_failure".into());
+               fs::create_dir_all(&store.get_data_dir()).unwrap();
+
+               // Set up a dummy channel and force close. This will produce a monitor
+               // that we can then use to test persistence.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+
+               // Set the store's directory to read-only, which should result in
+               // returning a permanent failure when we then attempt to persist a
+               // channel update.
+               let path = &store.get_data_dir();
+               let mut perms = fs::metadata(path).unwrap().permissions();
+               perms.set_readonly(true);
+               fs::set_permissions(path, perms).unwrap();
+
+               let test_txo = OutPoint {
+                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
+                       index: 0
+               };
+               match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
+                       _ => panic!("unexpected result from persisting new channel")
+               }
+
+               nodes[1].node.get_and_clear_pending_msg_events();
+               added_monitors.clear();
+       }
+
+       // Test that if a store's directory name is invalid, monitor persistence
+       // will fail.
+       #[cfg(target_os = "windows")]
+       #[test]
+       fn test_fail_on_open() {
+               // Set up a dummy channel and force close. This will produce a monitor
+               // that we can then use to test persistence.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+
+               // Create the store with an invalid directory name and test that the
+               // channel fails to open because the directories fail to be created. There
+               // don't seem to be invalid filename characters on Unix that Rust doesn't
+               // handle, hence why the test is Windows-only.
+               let store = FilesystemStore::new(":<>/".into());
+
+               let test_txo = OutPoint {
+                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
+                       index: 0
+               };
+               match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+                       ChannelMonitorUpdateStatus::PermanentFailure => {},
+                       _ => panic!("unexpected result from persisting new channel")
+               }
+
+               nodes[1].node.get_and_clear_pending_msg_events();
+               added_monitors.clear();
+       }
+}
+
+#[cfg(ldk_bench)]
+/// Benches
+pub mod bench {
+       use criterion::Criterion;
+
+       /// Bench!
+       pub fn bench_sends(bench: &mut Criterion) {
+               let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into());
+               let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into());
+               lightning::ln::channelmanager::bench::bench_two_sends(
+                       bench, "bench_filesystem_persisted_sends", store_a, store_b);
+       }
+}
index b34fe895b47e7df0a707db63b2225a1120de523b..ae258e137d742f32ce1067d6508ad133e02bd67a 100644 (file)
@@ -1,6 +1,6 @@
-//! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
-
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+//! Provides utilities for LDK data persistence and retrieval.
+//
+// TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
 #![deny(broken_intra_doc_links)]
 #![deny(private_intra_doc_links)]
 
 
 #[cfg(ldk_bench)] extern crate criterion;
 
-mod util;
-
-extern crate lightning;
-extern crate bitcoin;
-extern crate libc;
-
-use bitcoin::hash_types::{BlockHash, Txid};
-use bitcoin::hashes::hex::FromHex;
-use lightning::chain::channelmonitor::ChannelMonitor;
-use lightning::sign::{EntropySource, SignerProvider};
-use lightning::util::ser::{ReadableArgs, Writeable};
-use lightning::util::persist::KVStorePersister;
-use std::fs;
-use std::io::Cursor;
-use std::ops::Deref;
-use std::path::{Path, PathBuf};
-
-/// FilesystemPersister persists channel data on disk, where each channel's
-/// data is stored in a file named after its funding outpoint.
-///
-/// Warning: this module does the best it can with calls to persist data, but it
-/// can only guarantee that the data is passed to the drive. It is up to the
-/// drive manufacturers to do the actual persistence properly, which they often
-/// don't (especially on consumer-grade hardware). Therefore, it is up to the
-/// user to validate their entire storage stack, to ensure the writes are
-/// persistent.
-/// Corollary: especially when dealing with larger amounts of money, it is best
-/// practice to have multiple channel data backups and not rely only on one
-/// FilesystemPersister.
-pub struct FilesystemPersister {
-       path_to_channel_data: String,
-}
-
-impl FilesystemPersister {
-       /// Initialize a new FilesystemPersister and set the path to the individual channels'
-       /// files.
-       pub fn new(path_to_channel_data: String) -> Self {
-               Self {
-                       path_to_channel_data,
-               }
-       }
-
-       /// Get the directory which was provided when this persister was initialized.
-       pub fn get_data_dir(&self) -> String {
-               self.path_to_channel_data.clone()
-       }
-
-       /// Read `ChannelMonitor`s from disk.
-       pub fn read_channelmonitors<ES: Deref, SP: Deref> (
-               &self, entropy_source: ES, signer_provider: SP
-       ) -> std::io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
-               where
-                       ES::Target: EntropySource + Sized,
-                       SP::Target: SignerProvider + Sized
-       {
-               let mut path = PathBuf::from(&self.path_to_channel_data);
-               path.push("monitors");
-               if !Path::new(&path).exists() {
-                       return Ok(Vec::new());
-               }
-               let mut res = Vec::new();
-               for file_option in fs::read_dir(path)? {
-                       let file = file_option.unwrap();
-                       let owned_file_name = file.file_name();
-                       let filename = owned_file_name.to_str()
-                               .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::InvalidData,
-                                       "File name is not a valid utf8 string"))?;
-                       if !filename.is_ascii() || filename.len() < 65 {
-                               return Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid ChannelMonitor file name",
-                               ));
-                       }
-                       if filename.ends_with(".tmp") {
-                               // If we were in the middle of committing an new update and crashed, it should be
-                               // safe to ignore the update - we should never have returned to the caller and
-                               // irrevocably committed to the new state in any way.
-                               continue;
-                       }
-
-                       let txid: Txid = Txid::from_hex(filename.split_at(64).0)
-                               .map_err(|_| std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid tx ID in filename",
-                               ))?;
-
-                       let index: u16 = filename.split_at(65).1.parse()
-                               .map_err(|_| std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid tx index in filename",
-                               ))?;
+pub mod fs_store;
 
-                       let contents = fs::read(&file.path())?;
-                       let mut buffer = Cursor::new(&contents);
-                       match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(&mut buffer, (&*entropy_source, &*signer_provider)) {
-                               Ok((blockhash, channel_monitor)) => {
-                                       if channel_monitor.get_funding_txo().0.txid != txid || channel_monitor.get_funding_txo().0.index != index {
-                                               return Err(std::io::Error::new(std::io::ErrorKind::InvalidData,
-                                                                              "ChannelMonitor was stored in the wrong file"));
-                                       }
-                                       res.push((blockhash, channel_monitor));
-                               }
-                               Err(e) => return Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       format!("Failed to deserialize ChannelMonitor: {}", e),
-                               ))
-                       }
-               }
-               Ok(res)
-       }
-}
-
-impl KVStorePersister for FilesystemPersister {
-       fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
-               let mut dest_file = PathBuf::from(self.path_to_channel_data.clone());
-               dest_file.push(key);
-               util::write_to_file(dest_file, object)
-       }
-}
+mod utils;
 
 #[cfg(test)]
-mod tests {
-       extern crate lightning;
-       extern crate bitcoin;
-       use crate::FilesystemPersister;
-       use bitcoin::hashes::hex::FromHex;
-       use bitcoin::Txid;
-       use lightning::chain::ChannelMonitorUpdateStatus;
-       use lightning::chain::chainmonitor::Persist;
-       use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
-       use lightning::chain::transaction::OutPoint;
-       use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
-       use lightning::events::{ClosureReason, MessageSendEventsProvider};
-       use lightning::ln::functional_test_utils::*;
-       use lightning::util::test_utils;
-       use std::fs;
-       #[cfg(target_os = "windows")]
-       use {
-               lightning::get_event_msg,
-               lightning::ln::msgs::ChannelMessageHandler,
-       };
-
-       impl Drop for FilesystemPersister {
-               fn drop(&mut self) {
-                       // We test for invalid directory names, so it's OK if directory removal
-                       // fails.
-                       match fs::remove_dir_all(&self.path_to_channel_data) {
-                               Err(e) => println!("Failed to remove test persister directory: {}", e),
-                               _ => {}
-                       }
-               }
-       }
-
-       #[test]
-       fn test_if_monitors_is_not_dir() {
-               let persister = FilesystemPersister::new("test_monitors_is_not_dir".to_string());
-
-               fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-               let mut path = std::path::PathBuf::from(&persister.path_to_channel_data);
-               path.push("monitors");
-               fs::File::create(path).unwrap();
-
-               let chanmon_cfgs = create_chanmon_cfgs(1);
-               let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
-               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, node_cfgs[0].keys_manager);
-               node_cfgs[0].chain_monitor = chain_mon_0;
-               let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
-               let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
-
-               // Check that read_channelmonitors() returns error if monitors/ is not a
-               // directory.
-               assert!(persister.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).is_err());
-       }
-
-       // Integration-test the FilesystemPersister. Test relaying a few payments
-       // and check that the persisted data is updated the appropriate number of
-       // times.
-       #[test]
-       fn test_filesystem_persister() {
-               // Create the nodes, giving them FilesystemPersisters for data persisters.
-               let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
-               let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, node_cfgs[0].keys_manager);
-               let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, node_cfgs[1].keys_manager);
-               node_cfgs[0].chain_monitor = chain_mon_0;
-               node_cfgs[1].chain_monitor = chain_mon_1;
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-               // Check that the persisted channel data is empty before any channels are
-               // open.
-               let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_0.len(), 0);
-               let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_1.len(), 0);
-
-               // Helper to make sure the channel is on the expected update ID.
-               macro_rules! check_persisted_data {
-                       ($expected_update_id: expr) => {
-                               persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_0.len(), 1);
-                               for (_, mon) in persisted_chan_data_0.iter() {
-                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                               }
-                               persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_1.len(), 1);
-                               for (_, mon) in persisted_chan_data_1.iter() {
-                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                               }
-                       }
-               }
-
-               // Create some initial channel and check that a channel was persisted.
-               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
-               check_persisted_data!(0);
-
-               // Send a few payments and make sure the monitors are updated to the latest.
-               send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-               check_persisted_data!(5);
-               send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
-               check_persisted_data!(10);
-
-               // Force close because cooperative close doesn't result in any persisted
-               // updates.
-               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
-               check_closed_broadcast!(nodes[0], true);
-               check_added_monitors!(nodes[0], 1);
-
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 1);
-
-               connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
-               check_closed_broadcast!(nodes[1], true);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
-               check_added_monitors!(nodes[1], 1);
-
-               // Make sure everything is persisted as expected after close.
-               check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // Test that if the persister's path to channel data is read-only, writing a
-       // monitor to it results in the persister returning a PermanentFailure.
-       // Windows ignores the read-only flag for folders, so this test is Unix-only.
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_readonly_dir_perm_failure() {
-               let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
-               fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-
-               // Set up a dummy channel and force close. This will produce a monitor
-               // that we can then use to test persistence.
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
-               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
-               // Set the persister's directory to read-only, which should result in
-               // returning a permanent failure when we then attempt to persist a
-               // channel update.
-               let path = &persister.path_to_channel_data;
-               let mut perms = fs::metadata(path).unwrap().permissions();
-               perms.set_readonly(true);
-               fs::set_permissions(path, perms).unwrap();
-
-               let test_txo = OutPoint {
-                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
-                       index: 0
-               };
-               match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {},
-                       _ => panic!("unexpected result from persisting new channel")
-               }
-
-               nodes[1].node.get_and_clear_pending_msg_events();
-               added_monitors.clear();
-       }
-
-       // Test that if a persister's directory name is invalid, monitor persistence
-       // will fail.
-       #[cfg(target_os = "windows")]
-       #[test]
-       fn test_fail_on_open() {
-               // Set up a dummy channel and force close. This will produce a monitor
-               // that we can then use to test persistence.
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
-               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
-               // Create the persister with an invalid directory name and test that the
-               // channel fails to open because the directories fail to be created. There
-               // don't seem to be invalid filename characters on Unix that Rust doesn't
-               // handle, hence why the test is Windows-only.
-               let persister = FilesystemPersister::new(":<>/".to_string());
-
-               let test_txo = OutPoint {
-                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
-                       index: 0
-               };
-               match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {},
-                       _ => panic!("unexpected result from persisting new channel")
-               }
-
-               nodes[1].node.get_and_clear_pending_msg_events();
-               added_monitors.clear();
-       }
-}
-
-#[cfg(ldk_bench)]
-/// Benches
-pub mod bench {
-       use criterion::Criterion;
-
-       /// Bench!
-       pub fn bench_sends(bench: &mut Criterion) {
-               let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
-               let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
-               lightning::ln::channelmanager::bench::bench_two_sends(
-                       bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
-       }
-}
+mod test_utils;
diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs
new file mode 100644 (file)
index 0000000..9155750
--- /dev/null
@@ -0,0 +1,121 @@
+use lightning::util::persist::{KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN, read_channel_monitors};
+use lightning::ln::functional_test_utils::{connect_block, create_announced_chan_between_nodes,
+       create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs,
+       send_payment};
+use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
+use lightning::util::test_utils;
+use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
+use lightning::events::ClosureReason;
+
+use std::panic::RefUnwindSafe;
+
+pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_store: &K) {
+       let data = [42u8; 32];
+
+       let namespace = "testspace";
+       let sub_namespace = "testsubspace";
+       let key = "testkey";
+
+       // Test the basic KVStore operations.
+       kv_store.write(namespace, sub_namespace, key, &data).unwrap();
+
+       // Test empty namespace/sub_namespace is allowed, but not empty namespace and non-empty
+       // sub-namespace, and not empty key.
+       kv_store.write("", "", key, &data).unwrap();
+       let res = std::panic::catch_unwind(|| kv_store.write("", sub_namespace, key, &data));
+       assert!(res.is_err());
+       let res = std::panic::catch_unwind(|| kv_store.write(namespace, sub_namespace, "", &data));
+       assert!(res.is_err());
+
+       let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+       assert_eq!(listed_keys.len(), 1);
+       assert_eq!(listed_keys[0], key);
+
+       let read_data = kv_store.read(namespace, sub_namespace, key).unwrap();
+       assert_eq!(data, &*read_data);
+
+       kv_store.remove(namespace, sub_namespace, key, false).unwrap();
+
+       let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+       assert_eq!(listed_keys.len(), 0);
+
+       // Ensure we have no issue operating with namespace/sub_namespace/key being KVSTORE_NAMESPACE_KEY_MAX_LEN
+       let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect();
+       kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap();
+
+       let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
+       assert_eq!(listed_keys.len(), 1);
+       assert_eq!(listed_keys[0], max_chars);
+
+       let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap();
+       assert_eq!(data, &*read_data);
+
+       kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap();
+
+       let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
+       assert_eq!(listed_keys.len(), 0);
+}
+
+// Integration-test the given KVStore implementation. Test relaying a few payments and check that
+// the persisted data is updated the appropriate number of times.
+pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, store_0, node_cfgs[0].keys_manager);
+       let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, store_1, node_cfgs[1].keys_manager);
+       node_cfgs[0].chain_monitor = chain_mon_0;
+       node_cfgs[1].chain_monitor = chain_mon_1;
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Check that the persisted channel data is empty before any channels are
+       // open.
+       let mut persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
+       assert_eq!(persisted_chan_data_0.len(), 0);
+       let mut persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
+       assert_eq!(persisted_chan_data_1.len(), 0);
+
+       // Helper to make sure the channel is on the expected update ID.
+       macro_rules! check_persisted_data {
+               ($expected_update_id: expr) => {
+                       persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
+                       assert_eq!(persisted_chan_data_0.len(), 1);
+                       for (_, mon) in persisted_chan_data_0.iter() {
+                               assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+                       }
+                       persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
+                       assert_eq!(persisted_chan_data_1.len(), 1);
+                       for (_, mon) in persisted_chan_data_1.iter() {
+                               assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+                       }
+               }
+       }
+
+       // Create some initial channel and check that a channel was persisted.
+       let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+       check_persisted_data!(0);
+
+       // Send a few payments and make sure the monitors are updated to the latest.
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       check_persisted_data!(5);
+       send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
+       check_persisted_data!(10);
+
+       // Force close because cooperative close doesn't result in any persisted
+       // updates.
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_broadcast!(nodes[0], true);
+       check_added_monitors!(nodes[0], 1);
+
+       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       assert_eq!(node_txn.len(), 1);
+
+       connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
+       check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
+       check_added_monitors!(nodes[1], 1);
+
+       // Make sure everything is persisted as expected after close.
+       check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
+}
diff --git a/lightning-persister/src/util.rs b/lightning-persister/src/util.rs
deleted file mode 100644 (file)
index 20c4a81..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-#[cfg(target_os = "windows")]
-extern crate winapi;
-
-use std::fs;
-use std::path::PathBuf;
-use std::io::BufWriter;
-
-#[cfg(not(target_os = "windows"))]
-use std::os::unix::io::AsRawFd;
-
-use lightning::util::ser::Writeable;
-
-#[cfg(target_os = "windows")]
-use {
-       std::ffi::OsStr,
-       std::os::windows::ffi::OsStrExt
-};
-
-#[cfg(target_os = "windows")]
-macro_rules! call {
-       ($e: expr) => (
-               if $e != 0 {
-                       return Ok(())
-               } else {
-                       return Err(std::io::Error::last_os_error())
-               }
-       )
-}
-
-#[cfg(target_os = "windows")]
-fn path_to_windows_str<T: AsRef<OsStr>>(path: T) -> Vec<winapi::shared::ntdef::WCHAR> {
-       path.as_ref().encode_wide().chain(Some(0)).collect()
-}
-
-#[allow(bare_trait_objects)]
-pub(crate) fn write_to_file<W: Writeable>(dest_file: PathBuf, data: &W) -> std::io::Result<()> {
-       let mut tmp_file = dest_file.clone();
-       tmp_file.set_extension("tmp");
-
-       let parent_directory = dest_file.parent().unwrap();
-       fs::create_dir_all(parent_directory)?;
-       // Do a crazy dance with lots of fsync()s to be overly cautious here...
-       // We never want to end up in a state where we've lost the old data, or end up using the
-       // old data on power loss after we've returned.
-       // The way to atomically write a file on Unix platforms is:
-       // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
-       {
-               // Note that going by rust-lang/rust@d602a6b, on MacOS it is only safe to use
-               // rust stdlib 1.36 or higher.
-               let mut buf = BufWriter::new(fs::File::create(&tmp_file)?);
-               data.write(&mut buf)?;
-               buf.into_inner()?.sync_all()?;
-       }
-       // Fsync the parent directory on Unix.
-       #[cfg(not(target_os = "windows"))]
-       {
-               fs::rename(&tmp_file, &dest_file)?;
-               let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
-               unsafe { libc::fsync(dir_file.as_raw_fd()); }
-       }
-       #[cfg(target_os = "windows")]
-       {
-               if dest_file.exists() {
-                       unsafe {winapi::um::winbase::ReplaceFileW(
-                               path_to_windows_str(dest_file).as_ptr(), path_to_windows_str(tmp_file).as_ptr(), std::ptr::null(),
-                               winapi::um::winbase::REPLACEFILE_IGNORE_MERGE_ERRORS,
-                               std::ptr::null_mut() as *mut winapi::ctypes::c_void,
-                               std::ptr::null_mut() as *mut winapi::ctypes::c_void
-                       )};
-               } else {
-                       call!(unsafe {winapi::um::winbase::MoveFileExW(
-                               path_to_windows_str(tmp_file).as_ptr(), path_to_windows_str(dest_file).as_ptr(),
-                               winapi::um::winbase::MOVEFILE_WRITE_THROUGH | winapi::um::winbase::MOVEFILE_REPLACE_EXISTING
-                       )});
-               }
-       }
-       Ok(())
-}
-
-#[cfg(test)]
-mod tests {
-       use lightning::util::ser::{Writer, Writeable};
-
-       use super::{write_to_file};
-       use std::fs;
-       use std::io;
-       use std::path::PathBuf;
-
-       struct TestWriteable{}
-       impl Writeable for TestWriteable {
-               fn write<W: Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> {
-                       writer.write_all(&[42; 1])
-               }
-       }
-
-       // Test that if the persister's path to channel data is read-only, writing
-       // data to it fails. Windows ignores the read-only flag for folders, so this
-       // test is Unix-only.
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_readonly_dir() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_readonly_dir_persister_filename".to_string();
-               let path = "test_readonly_dir_persister_dir";
-               fs::create_dir_all(path).unwrap();
-               let mut perms = fs::metadata(path).unwrap().permissions();
-               perms.set_readonly(true);
-               fs::set_permissions(path, perms).unwrap();
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => assert_eq!(e.kind(), io::ErrorKind::PermissionDenied),
-                       _ => panic!("Unexpected error message")
-               }
-       }
-
-       // Test failure to rename in the process of atomically creating a channel
-       // monitor's file. We induce this failure by making the `tmp` file a
-       // directory.
-       // Explanation: given "from" = the file being renamed, "to" = the destination
-       // file that already exists: Unix should fail because if "from" is a file,
-       // then "to" is also required to be a file.
-       // TODO: ideally try to make this work on Windows again
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_rename_failure() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_rename_failure_filename";
-               let path = "test_rename_failure_dir";
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               // Create the channel data file and make it a directory.
-               fs::create_dir_all(dest_file.clone()).unwrap();
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => assert_eq!(e.raw_os_error(), Some(libc::EISDIR)),
-                       _ => panic!("Unexpected Ok(())")
-               }
-               fs::remove_dir_all(path).unwrap();
-       }
-
-       #[test]
-       fn test_diskwriteable_failure() {
-               struct FailingWriteable {}
-               impl Writeable for FailingWriteable {
-                       fn write<W: Writer>(&self, _writer: &mut W) -> Result<(), std::io::Error> {
-                               Err(std::io::Error::new(std::io::ErrorKind::Other, "expected failure"))
-                       }
-               }
-
-               let filename = "test_diskwriteable_failure";
-               let path = "test_diskwriteable_failure_dir";
-               let test_writeable = FailingWriteable{};
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => {
-                               assert_eq!(e.kind(), std::io::ErrorKind::Other);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "expected failure");
-                       },
-                       _ => panic!("unexpected result")
-               }
-               fs::remove_dir_all(path).unwrap();
-       }
-
-       // Test failure to create the temporary file in the persistence process.
-       // We induce this failure by having the temp file already exist and be a
-       // directory.
-       #[test]
-       fn test_tmp_file_creation_failure() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_tmp_file_creation_failure_filename".to_string();
-               let path = "test_tmp_file_creation_failure_dir";
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               let mut tmp_file = dest_file.clone();
-               tmp_file.set_extension("tmp");
-               fs::create_dir_all(tmp_file).unwrap();
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => {
-                               #[cfg(not(target_os = "windows"))]
-                               assert_eq!(e.raw_os_error(), Some(libc::EISDIR));
-                               #[cfg(target_os = "windows")]
-                               assert_eq!(e.kind(), io::ErrorKind::PermissionDenied);
-                       }
-                       _ => panic!("Unexpected error message")
-               }
-       }
-}
diff --git a/lightning-persister/src/utils.rs b/lightning-persister/src/utils.rs
new file mode 100644 (file)
index 0000000..54ec230
--- /dev/null
@@ -0,0 +1,59 @@
+use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN};
+use lightning::util::string::PrintableString;
+
+
+pub(crate) fn is_valid_kvstore_str(key: &str) -> bool {
+       key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c))
+}
+
+pub(crate) fn check_namespace_key_validity(namespace: &str, sub_namespace: &str, key: Option<&str>, operation: &str) -> Result<(), std::io::Error> {
+       if let Some(key) = key {
+               if key.is_empty() {
+                       debug_assert!(false, "Failed to {} {}/{}/{}: key may not be empty.", operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       let msg = format!("Failed to {} {}/{}/{}: key may not be empty.", operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+
+               if namespace.is_empty() && !sub_namespace.is_empty() {
+                       debug_assert!(false,
+                               "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       let msg = format!(
+                               "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.", operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+
+               if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) || !is_valid_kvstore_str(key) {
+                       debug_assert!(false, "Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+                               operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       let msg = format!("Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+                               operation,
+                               PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+       } else {
+               if namespace.is_empty() && !sub_namespace.is_empty() {
+                       debug_assert!(false,
+                               "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       let msg = format!(
+                               "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+               if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) {
+                       debug_assert!(false, "Failed to {} {}/{}: namespace and sub-namespace must be valid.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       let msg = format!("Failed to {} {}/{}: namespace and sub-namespace must be valid.",
+                               operation, PrintableString(namespace), PrintableString(sub_namespace));
+                       return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+               }
+       }
+
+       Ok(())
+}
index 6051f00b90a8327026ff89b2a6a758fd5c6fadc4..fef1e3bf14fc7b8c051e084f3c492084293562b7 100644 (file)
@@ -772,7 +772,7 @@ where C::Target: chain::Filter,
                                        monitor_state.last_chain_persist_height.load(Ordering::Acquire) + LATENCY_GRACE_PERIOD_BLOCKS as usize
                                                > self.highest_chain_height.load(Ordering::Acquire)
                        {
-                               log_info!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
+                               log_debug!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
                        } else {
                                if monitor_state.channel_perm_failed.load(Ordering::Acquire) {
                                        // If a `UpdateOrigin::ChainSync` persistence failed with `PermanantFailure`,
index 95f2eb357dcd57a395227b828704ad9587c7dac5..bb98e271597309d057ca4712b394e302b35cddc3 100644 (file)
@@ -508,6 +508,14 @@ pub enum Event {
                /// serialized prior to LDK version 0.0.117.
                sender_intended_total_msat: Option<u64>,
        },
+       /// Indicates a request for an invoice failed to yield a response in a reasonable amount of time
+       /// or was explicitly abandoned by [`ChannelManager::abandon_payment`].
+       ///
+       /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+       InvoiceRequestFailed {
+               /// The `payment_id` to have been associated with payment for the requested invoice.
+               payment_id: PaymentId,
+       },
        /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
        /// and we got back the payment preimage for it).
        ///
@@ -1148,6 +1156,12 @@ impl Writeable for Event {
                                        (8, funding_txo, required),
                                });
                        },
+                       &Event::InvoiceRequestFailed { ref payment_id } => {
+                               33u8.write(writer)?;
+                               write_tlv_fields!(writer, {
+                                       (0, payment_id, required),
+                               })
+                       },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
@@ -1535,6 +1549,17 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
+                       33u8 => {
+                               let f = || {
+                                       _init_and_read_len_prefixed_tlv_fields!(reader, {
+                                               (0, payment_id, required),
+                                       });
+                                       Ok(Some(Event::InvoiceRequestFailed {
+                                               payment_id: payment_id.0.unwrap(),
+                                       }))
+                               };
+                               f()
+                       },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
                        // reads.
@@ -1667,7 +1692,7 @@ pub enum MessageSendEvent {
                /// The node_id of the node which should receive this message
                node_id: PublicKey,
                /// The message which should be sent.
-               msg: msgs::TxAddInput,
+               msg: msgs::TxAbort,
        },
        /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id.
        SendChannelReady {
index 8e52af093d6c59e42a05d5a8e11d0307fcd8c367..1302cdbe379c786c928bdab527a7654c4ccebe03 100644 (file)
@@ -20,10 +20,10 @@ use crate::chain::transaction::OutPoint;
 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
-use crate::ln::channel::AnnouncementSigsState;
+use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::errors::APIError;
 use crate::util::ser::{ReadableArgs, Writeable};
 use crate::util::test_utils::TestBroadcaster;
@@ -111,7 +111,7 @@ fn test_monitor_and_persister_update_fail() {
        let chain_mon = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
                                &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -136,15 +136,18 @@ fn test_monitor_and_persister_update_fail() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Check that even though the persister is returning a InProgress,
-                       // because the update is bogus, ultimately the error that's returned
-                       // should be a PermanentFailure.
-                       if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
-                       logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               // Check that even though the persister is returning a InProgress,
+                               // because the update is bogus, ultimately the error that's returned
+                               // should be a PermanentFailure.
+                               if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
+                               logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
 
        check_added_monitors!(nodes[0], 1);
@@ -1460,12 +1463,12 @@ fn monitor_failed_no_reestablish_response() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
        }
        {
                let mut node_1_per_peer_lock;
                let mut node_1_peer_state_lock;
-               get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
        }
 
        // Route the payment and deliver the initial commitment_signed (with a monitor update failure
index da1364021cefe99e9f1b63e2ecd215d8f67b438e..796c041f8185d78d68972370726fdccb96fb4fd5 100644 (file)
@@ -594,6 +594,9 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
 /// exceeding this age limit will be force-closed and purged from memory.
 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
 
+/// Number of blocks needed for an output from a coinbase transaction to be spendable.
+pub(crate) const COINBASE_MATURITY: u32 = 100;
+
 struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
 }
@@ -602,6 +605,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
 });
 
+/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
+/// its variants containing an appropriate channel struct.
+pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
+       UnfundedOutboundV1(OutboundV1Channel<SP>),
+       UnfundedInboundV1(InboundV1Channel<SP>),
+       Funded(Channel<SP>),
+}
+
+impl<'a, SP: Deref> ChannelPhase<SP> where
+       SP::Target: SignerProvider,
+       <SP::Target as SignerProvider>::Signer: ChannelSigner,
+{
+       pub fn context(&'a self) -> &'a ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(chan) => &chan.context,
+                       ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
+                       ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+               }
+       }
+
+       pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
+               match self {
+                       ChannelPhase::Funded(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
+                       ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+               }
+       }
+}
+
 /// Contains all state common to unfunded inbound/outbound channels.
 pub(super) struct UnfundedChannelContext {
        /// A counter tracking how many ticks have elapsed since this unfunded channel was
@@ -2033,11 +2065,6 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur
        (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
 }
 
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_funding_signed on an
-// inbound channel.
-//
 // Holder designates channel data owned for the benefit of the user client.
 // Counterparty designates channel data owned by the another channel participant entity.
 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
@@ -4734,12 +4761,14 @@ impl<SP: Deref> Channel<SP> where
                                                        return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
                                                } else {
                                                        if self.context.is_outbound() {
-                                                               for input in tx.input.iter() {
-                                                                       if input.witness.is_empty() {
-                                                                               // We generated a malleable funding transaction, implying we've
-                                                                               // just exposed ourselves to funds loss to our counterparty.
-                                                                               #[cfg(not(fuzzing))]
-                                                                               panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+                                                               if !tx.is_coin_base() {
+                                                                       for input in tx.input.iter() {
+                                                                               if input.witness.is_empty() {
+                                                                                       // We generated a malleable funding transaction, implying we've
+                                                                                       // just exposed ourselves to funds loss to our counterparty.
+                                                                                       #[cfg(not(fuzzing))]
+                                                                                       panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+                                                                               }
                                                                        }
                                                                }
                                                        }
@@ -4750,6 +4779,13 @@ impl<SP: Deref> Channel<SP> where
                                                                Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
                                                        }
                                                }
+                                               // If this is a coinbase transaction and not a 0-conf channel
+                                               // we should update our min_depth to 100 to handle coinbase maturity
+                                               if tx.is_coin_base() &&
+                                                       self.context.minimum_depth.unwrap_or(0) > 0 &&
+                                                       self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+                                                       self.context.minimum_depth = Some(COINBASE_MATURITY);
+                                               }
                                        }
                                        // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
@@ -5821,6 +5857,15 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                self.context.channel_state = ChannelState::FundingCreated as u32;
                self.context.channel_id = funding_txo.to_channel_id();
+
+               // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
+               // We can skip this if it is a zero-conf channel.
+               if funding_transaction.is_coin_base() &&
+                       self.context.minimum_depth.unwrap_or(0) > 0 &&
+                       self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+                       self.context.minimum_depth = Some(COINBASE_MATURITY);
+               }
+
                self.context.funding_transaction = Some(funding_transaction);
 
                let channel = Channel {
index 621ebf0c460333446e22d2a28bd7bdc4b724dae8..cdbcd302929c70b1a2a5c59dd51da2d22df39e2e 100644 (file)
@@ -21,7 +21,9 @@ use core::fmt;
 use core::ops::Deref;
 
 /// A unique 32-byte identifier for a channel.
-/// Depending on how the ID is generated, several varieties are distinguished (but all are stored as 32 bytes): _v1_ and _temporary_.
+/// Depending on how the ID is generated, several varieties are distinguished
+/// (but all are stored as 32 bytes):
+///   _v1_ and _temporary_.
 /// A _v1_ channel ID is generated based on funding tx outpoint (txid & index).
 /// A _temporary_ ID is generated randomly.
 /// (Later revocation-point-based _v2_ is a possibility.)
index 64d5521dcb95f78ba3317398e19438f074b19dbc..6ea51660008fbad931c76d7cef7254039fed16a5 100644 (file)
@@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
@@ -237,7 +237,12 @@ impl From<&ClaimableHTLC> for events::ClaimedHTLC {
 ///
 /// This is not exported to bindings users as we just use [u8; 32] directly
 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentId(pub [u8; 32]);
+pub struct PaymentId(pub [u8; Self::LENGTH]);
+
+impl PaymentId {
+       /// Number of bytes in the id.
+       pub const LENGTH: usize = 32;
+}
 
 impl Writeable for PaymentId {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
@@ -666,22 +671,10 @@ impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
 
 /// State we hold per-peer.
 pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
-       /// `channel_id` -> `Channel`.
-       ///
-       /// Holds all funded channels where the peer is the counterparty.
-       pub(super) channel_by_id: HashMap<ChannelId, Channel<SP>>,
-       /// `temporary_channel_id` -> `OutboundV1Channel`.
-       ///
-       /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
-       /// been assigned a `channel_id`, the entry in this map is removed and one is created in
-       /// `channel_by_id`.
-       pub(super) outbound_v1_channel_by_id: HashMap<ChannelId, OutboundV1Channel<SP>>,
-       /// `temporary_channel_id` -> `InboundV1Channel`.
-       ///
-       /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
-       /// been assigned a `channel_id`, the entry in this map is removed and one is created in
-       /// `channel_by_id`.
-       pub(super) inbound_v1_channel_by_id: HashMap<ChannelId, InboundV1Channel<SP>>,
+       /// `channel_id` -> `ChannelPhase`
+       ///
+       /// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty.
+       pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
        /// `temporary_channel_id` -> `InboundChannelRequest`.
        ///
        /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
@@ -735,24 +728,20 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
-               self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+               self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+                       && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
 
        // Returns a count of all channels we have with this peer, including unfunded channels.
        fn total_channel_count(&self) -> usize {
-               self.channel_by_id.len() +
-                       self.outbound_v1_channel_by_id.len() +
-                       self.inbound_v1_channel_by_id.len() +
-                       self.inbound_channel_request_by_id.len()
+               self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
        }
 
        // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
        fn has_channel(&self, channel_id: &ChannelId) -> bool {
-               self.channel_by_id.contains_key(&channel_id) ||
-                       self.outbound_v1_channel_by_id.contains_key(&channel_id) ||
-                       self.inbound_v1_channel_by_id.contains_key(&channel_id) ||
-                       self.inbound_channel_request_by_id.contains_key(&channel_id)
+               self.channel_by_id.contains_key(channel_id) ||
+                       self.inbound_channel_request_by_id.contains_key(channel_id)
        }
 }
 
@@ -1336,11 +1325,6 @@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_G
 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
 pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
 
-/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
-/// idempotency of payments by [`PaymentId`]. See
-/// [`OutboundPayments::remove_stale_resolved_payments`].
-pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
-
 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
 /// until we mark the channel disabled and gossip the update.
 pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
@@ -1683,6 +1667,11 @@ pub enum ChannelShutdownState {
 /// These include payments that have yet to find a successful path, or have unresolved HTLCs.
 #[derive(Debug, PartialEq)]
 pub enum RecentPaymentDetails {
+       /// When an invoice was requested and thus a payment has not yet been sent.
+       AwaitingInvoice {
+               /// Identifier for the payment to ensure idempotency.
+               payment_id: PaymentId,
+       },
        /// When a payment is still being sent and awaiting successful delivery.
        Pending {
                /// Hash of the payment that is currently being sent but has yet to be fulfilled or
@@ -1806,45 +1795,55 @@ macro_rules! update_maps_on_chan_removal {
 }
 
 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
-macro_rules! convert_chan_err {
-       ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
+macro_rules! convert_chan_phase_err {
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
                match $err {
                        ChannelError::Warn(msg) => {
-                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
                        },
                        ChannelError::Ignore(msg) => {
-                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
                        },
                        ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", &$channel_id, msg);
-                               update_maps_on_chan_removal!($self, &$channel.context);
+                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
+                               update_maps_on_chan_removal!($self, $channel.context);
                                let shutdown_res = $channel.context.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
-                                       shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis()))
+                               let user_id = $channel.context.get_user_id();
+                               let channel_capacity_satoshis = $channel.context.get_value_satoshis();
+
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
+                                       shutdown_res, $channel_update, channel_capacity_satoshis))
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
-               match $err {
-                       // We should only ever have `ChannelError::Close` when unfunded channels error.
-                       // In any case, just close the channel.
-                       ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", &$channel_id, msg);
-                               update_maps_on_chan_removal!($self, &$channel_context);
-                               let shutdown_res = $channel_context.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
-                                       shutdown_res, None, $channel_context.get_value_satoshis()))
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
+               convert_chan_phase_err!($self, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
+       };
+       ($self: ident, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
+               convert_chan_phase_err!($self, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
+       };
+       ($self: ident, $err: expr, $channel_phase: expr, $channel_id: expr) => {
+               match $channel_phase {
+                       ChannelPhase::Funded(channel) => {
+                               convert_chan_phase_err!($self, $err, channel, $channel_id, FUNDED_CHANNEL)
+                       },
+                       ChannelPhase::UnfundedOutboundV1(channel) => {
+                               convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+                       },
+                       ChannelPhase::UnfundedInboundV1(channel) => {
+                               convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
                }
-       }
+       };
 }
 
-macro_rules! break_chan_entry {
+macro_rules! break_chan_phase_entry {
        ($self: ident, $res: expr, $entry: expr) => {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
+                               let key = *$entry.key();
+                               let (drop, res) = convert_chan_phase_err!($self, e, $entry.get_mut(), &key);
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1854,27 +1853,13 @@ macro_rules! break_chan_entry {
        }
 }
 
-macro_rules! try_v1_outbound_chan_entry {
-       ($self: ident, $res: expr, $entry: expr) => {
-               match $res {
-                       Ok(res) => res,
-                       Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
-                               if drop {
-                                       $entry.remove_entry();
-                               }
-                               return Err(res);
-                       }
-               }
-       }
-}
-
-macro_rules! try_chan_entry {
+macro_rules! try_chan_phase_entry {
        ($self: ident, $res: expr, $entry: expr) => {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
+                               let key = *$entry.key();
+                               let (drop, res) = convert_chan_phase_err!($self, e, $entry.get_mut(), &key);
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1884,11 +1869,11 @@ macro_rules! try_chan_entry {
        }
 }
 
-macro_rules! remove_channel {
+macro_rules! remove_channel_phase {
        ($self: expr, $entry: expr) => {
                {
                        let channel = $entry.remove_entry().1;
-                       update_maps_on_chan_removal!($self, &channel.context);
+                       update_maps_on_chan_removal!($self, &channel.context());
                        channel
                }
        }
@@ -2028,7 +2013,20 @@ macro_rules! handle_new_monitor_update {
                        handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
        };
        ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
-               handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_INITIAL_MONITOR, $chan_entry.remove_entry())
+               if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
+                       handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
+                               $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
+               } else {
+                       // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
+                       // update).
+                       debug_assert!(false);
+                       let channel_id = *$chan_entry.key();
+                       let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
+                               "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
+                               $chan_entry.get_mut(), &channel_id);
+                       $chan_entry.remove();
+                       Err(err)
+               }
        };
        ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
                let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
@@ -2052,7 +2050,20 @@ macro_rules! handle_new_monitor_update {
                        })
        } };
        ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
-               handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+               if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
+                       handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
+                               $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
+               } else {
+                       // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
+                       // update).
+                       debug_assert!(false);
+                       let channel_id = *$chan_entry.key();
+                       let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
+                               "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
+                               $chan_entry.get_mut(), &channel_id);
+                       $chan_entry.remove();
+                       Err(err)
+               }
        }
 }
 
@@ -2289,7 +2300,7 @@ where
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
                let temporary_channel_id = channel.context.channel_id();
-               match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) {
+               match peer_state.channel_by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(fuzzing) {
                                        return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
@@ -2297,7 +2308,7 @@ where
                                        panic!("RNG is bad???");
                                }
                        },
-                       hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
+                       hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
                }
 
                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
@@ -2321,12 +2332,18 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               // Only `Channels` in the channel_by_id map can be considered funded.
-                               for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone(), &self.fee_estimator);
-                                       res.push(details);
-                               }
+                               res.extend(peer_state.channel_by_id.iter()
+                                       .filter_map(|(chan_id, phase)| match phase {
+                                               // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded.
+                                               ChannelPhase::Funded(chan) => Some((chan_id, chan)),
+                                               _ => None,
+                                       })
+                                       .filter(f)
+                                       .map(|(_channel_id, channel)| {
+                                               ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                                                       peer_state.latest_features.clone(), &self.fee_estimator)
+                                       })
+                               );
                        }
                }
                res
@@ -2348,18 +2365,8 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               for (_channel_id, channel) in peer_state.channel_by_id.iter() {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone(), &self.fee_estimator);
-                                       res.push(details);
-                               }
-                               for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
-                                               peer_state.latest_features.clone(), &self.fee_estimator);
-                                       res.push(details);
-                               }
-                               for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
-                                       let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+                               for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
+                                       let details = ChannelDetails::from_channel_context(context, best_block_height,
                                                peer_state.latest_features.clone(), &self.fee_estimator);
                                        res.push(details);
                                }
@@ -2390,15 +2397,13 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        let features = &peer_state.latest_features;
-                       let chan_context_to_details = |context| {
+                       let context_to_details = |context| {
                                ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
                        };
                        return peer_state.channel_by_id
                                .iter()
-                               .map(|(_, channel)| &channel.context)
-                               .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
-                               .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
-                               .map(chan_context_to_details)
+                               .map(|(_, phase)| phase.context())
+                               .map(context_to_details)
                                .collect();
                }
                vec![]
@@ -2414,7 +2419,14 @@ where
        /// [`Event::PaymentSent`]: events::Event::PaymentSent
        pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
                self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
-                       .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment {
+                       .filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
+                               PendingOutboundPayment::AwaitingInvoice { .. } => {
+                                       Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
+                               },
+                               // InvoiceReceived is an intermediate state and doesn't need to be exposed
+                               PendingOutboundPayment::InvoiceReceived { .. } => {
+                                       Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
+                               },
                                PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
                                        Some(RecentPaymentDetails::Pending {
                                                payment_hash: *payment_hash,
@@ -2467,37 +2479,40 @@ where
                                let peer_state = &mut *peer_state_lock;
 
                                match peer_state.channel_by_id.entry(channel_id.clone()) {
-                                       hash_map::Entry::Occupied(mut chan_entry) => {
-                                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                                               let their_features = &peer_state.latest_features;
-                                               let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
-                                                       .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
-                                               failed_htlcs = htlcs;
-
-                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                               // here as we don't need the monitor update to complete until we send a
-                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: *counterparty_node_id,
-                                                       msg: shutdown_msg,
-                                               });
+                                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                                       let funding_txo_opt = chan.context.get_funding_txo();
+                                                       let their_features = &peer_state.latest_features;
+                                                       let (shutdown_msg, mut monitor_update_opt, htlcs) =
+                                                               chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+                                                       failed_htlcs = htlcs;
+
+                                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                                       // here as we don't need the monitor update to complete until we send a
+                                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: *counterparty_node_id,
+                                                               msg: shutdown_msg,
+                                                       });
 
-                                               // Update the monitor with the shutdown script if necessary.
-                                               if let Some(monitor_update) = monitor_update_opt.take() {
-                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
-                                               }
+                                                       // Update the monitor with the shutdown script if necessary.
+                                                       if let Some(monitor_update) = monitor_update_opt.take() {
+                                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                                       peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+                                                       }
 
-                                               if chan_entry.get().is_shutdown() {
-                                                       let channel = remove_channel!(self, chan_entry);
-                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
-                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                       msg: channel_update
-                                                               });
+                                                       if chan.is_shutdown() {
+                                                               if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
+                                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       msg: channel_update
+                                                                               });
+                                                                       }
+                                                                       self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
+                                                               }
                                                        }
-                                                       self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+                                                       break Ok(());
                                                }
-                                               break Ok(());
                                        },
                                        hash_map::Entry::Vacant(_) => (),
                                }
@@ -2507,8 +2522,6 @@ where
                        //
                        // An appropriate error will be returned for non-existence of the channel if that's the case.
                        return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
-                       // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
-                       // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
                };
 
                for htlc_source in failed_htlcs.drain(..) {
@@ -2616,26 +2629,21 @@ where
                        } else {
                                ClosureReason::HolderForceClosed
                        };
-                       if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
-                               self.issue_channel_close_events(&chan.get().context, closure_reason);
-                               let mut chan = remove_channel!(self, chan);
-                               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
-                               (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
-                       } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
-                               self.issue_channel_close_events(&chan.get().context, closure_reason);
-                               let mut chan = remove_channel!(self, chan);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Unfunded channel has no update
-                               (None, chan.context.get_counterparty_node_id())
-                       } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
-                               self.issue_channel_close_events(&chan.get().context, closure_reason);
-                               let mut chan = remove_channel!(self, chan);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               // Unfunded channel has no update
-                               (None, chan.context.get_counterparty_node_id())
+                       if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
+                               log_error!(self.logger, "Force-closing channel {}", channel_id);
+                               self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
+                               let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+                               match chan_phase {
+                                       ChannelPhase::Funded(mut chan) => {
+                                               self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+                                               (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
+                                       },
+                                       ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
+                                               self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false));
+                                               // Unfunded channel has no update
+                                               (None, chan_phase.context().get_counterparty_node_id())
+                                       },
+                               }
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
                                log_error!(self.logger, "Force-closing channel {}", &channel_id);
                                // N.B. that we don't send any channel close event here: we
@@ -2961,7 +2969,9 @@ where
                                }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
+                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
+                                       |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
+                               ).flatten() {
                                        None => {
                                                // Channel was removed. The short_to_chan_info and channel_by_id maps
                                                // have no consistency guarantees.
@@ -3234,36 +3244,41 @@ where
                                .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
-                               if !chan.get().context.is_live() {
-                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
-                               }
-                               let funding_txo = chan.get().context.get_funding_txo().unwrap();
-                               let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
-                                       htlc_cltv, HTLCSource::OutboundRoute {
-                                               path: path.clone(),
-                                               session_priv: session_priv.clone(),
-                                               first_hop_htlc_msat: htlc_msat,
-                                               payment_id,
-                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
-                               match break_chan_entry!(self, send_res, chan) {
-                                       Some(monitor_update) => {
-                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
-                                                       Err(e) => break Err(e),
-                                                       Ok(false) => {
-                                                               // Note that MonitorUpdateInProgress here indicates (per function
-                                                               // docs) that we will resend the commitment update once monitor
-                                                               // updating completes. Therefore, we must return an error
-                                                               // indicating that it is unsafe to retry the payment wholesale,
-                                                               // which we do in the send_payment check for
-                                                               // MonitorUpdateInProgress, below.
-                                                               return Err(APIError::MonitorUpdateInProgress);
+                       if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
+                               match chan_phase_entry.get_mut() {
+                                       ChannelPhase::Funded(chan) => {
+                                               if !chan.context.is_live() {
+                                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
+                                               }
+                                               let funding_txo = chan.context.get_funding_txo().unwrap();
+                                               let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+                                                       htlc_cltv, HTLCSource::OutboundRoute {
+                                                               path: path.clone(),
+                                                               session_priv: session_priv.clone(),
+                                                               first_hop_htlc_msat: htlc_msat,
+                                                               payment_id,
+                                                       }, onion_packet, None, &self.fee_estimator, &self.logger);
+                                               match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
+                                                       Some(monitor_update) => {
+                                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
+                                                                       Err(e) => break Err(e),
+                                                                       Ok(false) => {
+                                                                               // Note that MonitorUpdateInProgress here indicates (per function
+                                                                               // docs) that we will resend the commitment update once monitor
+                                                                               // updating completes. Therefore, we must return an error
+                                                                               // indicating that it is unsafe to retry the payment wholesale,
+                                                                               // which we do in the send_payment check for
+                                                                               // MonitorUpdateInProgress, below.
+                                                                               return Err(APIError::MonitorUpdateInProgress);
+                                                                       },
+                                                                       Ok(true) => {},
+                                                               }
                                                        },
-                                                       Ok(true) => {},
+                                                       None => {},
                                                }
                                        },
-                                       None => { },
-                               }
+                                       _ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
+                               };
                        } else {
                                // The channel was likely removed after we fetched the id from the
                                // `short_to_chan_info` map, but before we successfully locked the
@@ -3376,10 +3391,12 @@ where
        }
 
 
-       /// Signals that no further retries for the given payment should occur. Useful if you have a
+       /// Signals that no further attempts for the given payment should occur. Useful if you have a
        /// pending outbound payment with retries remaining, but wish to stop retrying the payment before
        /// retries are exhausted.
        ///
+       /// # Event Generation
+       ///
        /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
        /// as there are no remaining pending HTLCs for this payment.
        ///
@@ -3387,11 +3404,19 @@ where
        /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
        /// determine the ultimate status of a payment.
        ///
-       /// If an [`Event::PaymentFailed`] event is generated and we restart without this
-       /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
+       /// # Requested Invoices
        ///
-       /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
-       /// [`Event::PaymentSent`]: events::Event::PaymentSent
+       /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the
+       /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying
+       /// it once received. The other events may only be generated once the invoice has been received.
+       ///
+       /// # Restart Behavior
+       ///
+       /// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
+       /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated; likewise for
+       /// [`Event::InvoiceRequestFailed`].
+       ///
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        pub fn abandon_payment(&self, payment_id: PaymentId) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
@@ -3463,8 +3488,8 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(&temporary_channel_id) {
-                       Some(chan) => {
+               let (chan, msg) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+                       Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
                                let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
@@ -3488,13 +3513,18 @@ where
                                        },
                                }
                        },
-                       None => {
-                               return Err(APIError::ChannelUnavailable {
+                       Some(phase) => {
+                               peer_state.channel_by_id.insert(*temporary_channel_id, phase);
+                               return Err(APIError::APIMisuseError {
                                        err: format!(
-                                               "Channel with id {} not found for the passed counterparty node_id {}",
+                                               "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
                                                temporary_channel_id, counterparty_node_id),
                                })
                        },
+                       None => return Err(APIError::ChannelUnavailable {err: format!(
+                               "Channel with id {} not found for the passed counterparty node_id {}",
+                               temporary_channel_id, counterparty_node_id),
+                               }),
                };
 
                peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
@@ -3510,7 +3540,7 @@ where
                                if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
-                               e.insert(chan);
+                               e.insert(ChannelPhase::Funded(chan));
                        }
                }
                Ok(())
@@ -3556,11 +3586,13 @@ where
        pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
-               for inp in funding_transaction.input.iter() {
-                       if inp.witness.is_empty() {
-                               return Err(APIError::APIMisuseError {
-                                       err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
-                               });
+               if !funding_transaction.is_coin_base() {
+                       for inp in funding_transaction.input.iter() {
+                               if inp.witness.is_empty() {
+                                       return Err(APIError::APIMisuseError {
+                                               err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
+                                       });
+                               }
                        }
                }
                {
@@ -3648,27 +3680,23 @@ where
                        };
                }
                for channel_id in channel_ids {
-                       if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
-                               let mut config = channel.context.config();
+                       if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
+                               let mut config = channel_phase.context().config();
                                config.apply(config_update);
-                               if !channel.context.update_config(&config) {
+                               if !channel_phase.context_mut().update_config(&config) {
                                        continue;
                                }
-                               if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
-                               } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                               node_id: channel.context.get_counterparty_node_id(),
-                                               msg,
-                                       });
+                               if let ChannelPhase::Funded(channel) = channel_phase {
+                                       if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                                       } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                       node_id: channel.context.get_counterparty_node_id(),
+                                                       msg,
+                                               });
+                                       }
                                }
                                continue;
-                       }
-
-                       let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
-                               &mut channel.context
-                       } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
-                               &mut channel.context
                        } else {
                                // This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
                                debug_assert!(false);
@@ -3678,11 +3706,6 @@ where
                                                channel_id, counterparty_node_id),
                                });
                        };
-                       let mut config = context.config();
-                       config.apply(config_update);
-                       // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
-                       // which would be the case for pending inbound/outbound channels.
-                       context.update_config(&config);
                }
                Ok(())
        }
@@ -3749,8 +3772,8 @@ where
                                .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.get(&next_hop_channel_id) {
-                               Some(chan) => {
+                       match peer_state.channel_by_id.get(next_hop_channel_id) {
+                               Some(ChannelPhase::Funded(chan)) => {
                                        if !chan.context.is_usable() {
                                                return Err(APIError::ChannelUnavailable {
                                                        err: format!("Channel with id {} not fully established", next_hop_channel_id)
@@ -3758,8 +3781,12 @@ where
                                        }
                                        chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
                                },
+                               Some(_) => return Err(APIError::ChannelUnavailable {
+                                       err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
+                                               next_hop_channel_id, next_node_id)
+                               }),
                                None => return Err(APIError::ChannelUnavailable {
-                                       err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+                                       err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
                                                next_hop_channel_id, next_node_id)
                                })
                        }
@@ -3955,71 +3982,68 @@ where
                                        }
                                        let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
-                                       match peer_state.channel_by_id.entry(forward_chan_id) {
-                                               hash_map::Entry::Vacant(_) => {
-                                                       forwarding_channel_not_found!();
-                                                       continue;
-                                               },
-                                               hash_map::Entry::Occupied(mut chan) => {
-                                                       for forward_info in pending_forwards.drain(..) {
-                                                               match forward_info {
-                                                                       HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                               prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                                               forward_info: PendingHTLCInfo {
-                                                                                       incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
-                                                                                       routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
-                                                                               },
-                                                                       }) => {
-                                                                               log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
-                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                                       short_channel_id: prev_short_channel_id,
-                                                                                       user_channel_id: Some(prev_user_channel_id),
-                                                                                       outpoint: prev_funding_outpoint,
-                                                                                       htlc_id: prev_htlc_id,
-                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
-                                                                                       // Phantom payments are only PendingHTLCRouting::Receive.
-                                                                                       phantom_shared_secret: None,
-                                                                               });
-                                                                               if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
-                                                                                       payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                                       onion_packet, skimmed_fee_msat, &self.fee_estimator,
-                                                                                       &self.logger)
-                                                                               {
-                                                                                       if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
-                                                                                       } else {
-                                                                                               panic!("Stated return value requirements in send_htlc() were not met");
-                                                                                       }
-                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
-                                                                                       failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::reason(failure_code, data),
-                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
-                                                                                       ));
-                                                                                       continue;
-                                                                               }
+                                       if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                               for forward_info in pending_forwards.drain(..) {
+                                                       match forward_info {
+                                                               HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+                                                                       prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+                                                                       forward_info: PendingHTLCInfo {
+                                                                               incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+                                                                               routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
                                                                        },
-                                                                       HTLCForwardInfo::AddHTLC { .. } => {
-                                                                               panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
-                                                                       },
-                                                                       HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
-                                                                               log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                               if let Err(e) = chan.get_mut().queue_fail_htlc(
-                                                                                       htlc_id, err_packet, &self.logger
-                                                                               ) {
-                                                                                       if let ChannelError::Ignore(msg) = e {
-                                                                                               log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                                       } else {
-                                                                                               panic!("Stated return value requirements in queue_fail_htlc() were not met");
-                                                                                       }
-                                                                                       // fail-backs are best-effort, we probably already have one
-                                                                                       // pending, and if not that's OK, if not, the channel is on
-                                                                                       // the chain and sending the HTLC-Timeout is their problem.
-                                                                                       continue;
+                                                               }) => {
+                                                                       log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
+                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                               short_channel_id: prev_short_channel_id,
+                                                                               user_channel_id: Some(prev_user_channel_id),
+                                                                               outpoint: prev_funding_outpoint,
+                                                                               htlc_id: prev_htlc_id,
+                                                                               incoming_packet_shared_secret: incoming_shared_secret,
+                                                                               // Phantom payments are only PendingHTLCRouting::Receive.
+                                                                               phantom_shared_secret: None,
+                                                                       });
+                                                                       if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
+                                                                               payment_hash, outgoing_cltv_value, htlc_source.clone(),
+                                                                               onion_packet, skimmed_fee_msat, &self.fee_estimator,
+                                                                               &self.logger)
+                                                                       {
+                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                       log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+                                                                               } else {
+                                                                                       panic!("Stated return value requirements in send_htlc() were not met");
                                                                                }
-                                                                       },
-                                                               }
+                                                                               let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
+                                                                               failed_forwards.push((htlc_source, payment_hash,
+                                                                                       HTLCFailReason::reason(failure_code, data),
+                                                                                       HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
+                                                                               ));
+                                                                               continue;
+                                                                       }
+                                                               },
+                                                               HTLCForwardInfo::AddHTLC { .. } => {
+                                                                       panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+                                                               },
+                                                               HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+                                                                       log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       if let Err(e) = chan.queue_fail_htlc(
+                                                                               htlc_id, err_packet, &self.logger
+                                                                       ) {
+                                                                               if let ChannelError::Ignore(msg) = e {
+                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                               } else {
+                                                                                       panic!("Stated return value requirements in queue_fail_htlc() were not met");
+                                                                               }
+                                                                               // fail-backs are best-effort, we probably already have one
+                                                                               // pending, and if not that's OK, if not, the channel is on
+                                                                               // the chain and sending the HTLC-Timeout is their problem.
+                                                                               continue;
+                                                                       }
+                                                               },
                                                        }
                                                }
+                                       } else {
+                                               forwarding_channel_not_found!();
+                                               continue;
                                        }
                                } else {
                                        'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
@@ -4322,10 +4346,10 @@ where
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
-                                                               hash_map::Entry::Occupied(mut chan) => {
+                                                               hash_map::Entry::Occupied(mut chan_phase) => {
                                                                        updated_chan = true;
                                                                        handle_new_monitor_update!(self, funding_txo, update.clone(),
-                                                                               peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
+                                                                               peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
                                                                },
                                                                hash_map::Entry::Vacant(_) => Ok(()),
                                                        }
@@ -4349,7 +4373,7 @@ where
                                        if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                let peer_state = &mut *peer_state_lock;
-                                               if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
+                                               if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
                                                        handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
                                                } else {
                                                        let update_actions = peer_state.monitor_update_blocked_actions
@@ -4408,7 +4432,9 @@ where
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+                               for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
+                                       |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
+                               ) {
                                        let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                                                min_mempool_feerate
                                        } else {
@@ -4450,6 +4476,36 @@ where
                        let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        let mut pending_peers_awaiting_removal = Vec::new();
+
+                       let process_unfunded_channel_tick = |
+                               chan_id: &ChannelId,
+                               context: &mut ChannelContext<SP>,
+                               unfunded_context: &mut UnfundedChannelContext,
+                               pending_msg_events: &mut Vec<MessageSendEvent>,
+                               counterparty_node_id: PublicKey,
+                       | {
+                               context.maybe_expire_prev_config();
+                               if unfunded_context.should_expire_unfunded_channel() {
+                                       log_error!(self.logger,
+                                               "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
+                                       update_maps_on_chan_removal!(self, &context);
+                                       self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
+                                       self.finish_force_close_channel(context.force_shutdown(false));
+                                       pending_msg_events.push(MessageSendEvent::HandleError {
+                                               node_id: counterparty_node_id,
+                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                       msg: msgs::ErrorMessage {
+                                                               channel_id: *chan_id,
+                                                               data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
+                                                       },
+                                               },
+                                       });
+                                       false
+                               } else {
+                                       true
+                               }
+                       };
+
                        {
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
@@ -4457,110 +4513,89 @@ where
                                        let peer_state = &mut *peer_state_lock;
                                        let pending_msg_events = &mut peer_state.pending_msg_events;
                                        let counterparty_node_id = *counterparty_node_id;
-                                       peer_state.channel_by_id.retain(|chan_id, chan| {
-                                               let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
-                                                       min_mempool_feerate
-                                               } else {
-                                                       normal_feerate
-                                               };
-                                               let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
-                                               if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
-
-                                               if let Err(e) = chan.timer_check_closing_negotiation_progress() {
-                                                       let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
-                                                       handle_errors.push((Err(err), counterparty_node_id));
-                                                       if needs_close { return false; }
-                                               }
-
-                                               match chan.channel_update_status() {
-                                                       ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
-                                                       ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
-                                                       ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
-                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
-                                                       ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
-                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
-                                                       ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
-                                                               n += 1;
-                                                               if n >= DISABLE_GOSSIP_TICKS {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
-                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: update
-                                                                               });
-                                                                       }
-                                                                       should_persist = NotifyOption::DoPersist;
+                                       peer_state.channel_by_id.retain(|chan_id, phase| {
+                                               match phase {
+                                                       ChannelPhase::Funded(chan) => {
+                                                               let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                                                                       min_mempool_feerate
                                                                } else {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
-                                                               }
-                                                       },
-                                                       ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
-                                                               n += 1;
-                                                               if n >= ENABLE_GOSSIP_TICKS {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
-                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: update
-                                                                               });
-                                                                       }
-                                                                       should_persist = NotifyOption::DoPersist;
-                                                               } else {
-                                                                       chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
+                                                                       normal_feerate
+                                                               };
+                                                               let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
+                                                               if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+
+                                                               if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+                                                                       let (needs_close, err) = convert_chan_phase_err!(self, e, chan, chan_id, FUNDED_CHANNEL);
+                                                                       handle_errors.push((Err(err), counterparty_node_id));
+                                                                       if needs_close { return false; }
                                                                }
-                                                       },
-                                                       _ => {},
-                                               }
 
-                                               chan.context.maybe_expire_prev_config();
-
-                                               if chan.should_disconnect_peer_awaiting_response() {
-                                                       log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
-                                                                       counterparty_node_id, chan_id);
-                                                       pending_msg_events.push(MessageSendEvent::HandleError {
-                                                               node_id: counterparty_node_id,
-                                                               action: msgs::ErrorAction::DisconnectPeerWithWarning {
-                                                                       msg: msgs::WarningMessage {
-                                                                               channel_id: *chan_id,
-                                                                               data: "Disconnecting due to timeout awaiting response".to_owned(),
+                                                               match chan.channel_update_status() {
+                                                                       ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
+                                                                       ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
+                                                                       ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
+                                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+                                                                       ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
+                                                                               => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+                                                                       ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
+                                                                               n += 1;
+                                                                               if n >= DISABLE_GOSSIP_TICKS {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+                                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                                       msg: update
+                                                                                               });
+                                                                                       }
+                                                                                       should_persist = NotifyOption::DoPersist;
+                                                                               } else {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
+                                                                               }
                                                                        },
-                                                               },
-                                                       });
-                                               }
+                                                                       ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
+                                                                               n += 1;
+                                                                               if n >= ENABLE_GOSSIP_TICKS {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+                                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                                       msg: update
+                                                                                               });
+                                                                                       }
+                                                                                       should_persist = NotifyOption::DoPersist;
+                                                                               } else {
+                                                                                       chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
+                                                                               }
+                                                                       },
+                                                                       _ => {},
+                                                               }
 
-                                               true
-                                       });
+                                                               chan.context.maybe_expire_prev_config();
+
+                                                               if chan.should_disconnect_peer_awaiting_response() {
+                                                                       log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+                                                                                       counterparty_node_id, chan_id);
+                                                                       pending_msg_events.push(MessageSendEvent::HandleError {
+                                                                               node_id: counterparty_node_id,
+                                                                               action: msgs::ErrorAction::DisconnectPeerWithWarning {
+                                                                                       msg: msgs::WarningMessage {
+                                                                                               channel_id: *chan_id,
+                                                                                               data: "Disconnecting due to timeout awaiting response".to_owned(),
+                                                                                       },
+                                                                               },
+                                                                       });
+                                                               }
 
-                                       let process_unfunded_channel_tick = |
-                                               chan_id: &ChannelId,
-                                               chan_context: &mut ChannelContext<SP>,
-                                               unfunded_chan_context: &mut UnfundedChannelContext,
-                                               pending_msg_events: &mut Vec<MessageSendEvent>,
-                                       | {
-                                               chan_context.maybe_expire_prev_config();
-                                               if unfunded_chan_context.should_expire_unfunded_channel() {
-                                                       log_error!(self.logger,
-                                                               "Force-closing pending channel with ID {} for not establishing in a timely manner",
-                                                               &chan_id);
-                                                       update_maps_on_chan_removal!(self, &chan_context);
-                                                       self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
-                                                       self.finish_force_close_channel(chan_context.force_shutdown(false));
-                                                       pending_msg_events.push(MessageSendEvent::HandleError {
-                                                               node_id: counterparty_node_id,
-                                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                                       msg: msgs::ErrorMessage {
-                                                                               channel_id: *chan_id,
-                                                                               data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
-                                                                       },
-                                                               },
-                                                       });
-                                                       false
-                                               } else {
-                                                       true
+                                                               true
+                                                       },
+                                                       ChannelPhase::UnfundedInboundV1(chan) => {
+                                                               process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+                                                                       pending_msg_events, counterparty_node_id)
+                                                       },
+                                                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                               process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+                                                                       pending_msg_events, counterparty_node_id)
+                                                       },
                                                }
-                                       };
-                                       peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
-                                               chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
-                                       peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
-                                               chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
+                                       });
 
                                        for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
                                                if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
@@ -4648,7 +4683,7 @@ where
                                let _ = handle_error!(self, err, counterparty_node_id);
                        }
 
-                       self.pending_outbound_payments.remove_stale_resolved_payments(&self.pending_events);
+                       self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
 
                        // Technically we don't need to do this here, but if we have holding cell entries in a
                        // channel that need freeing, it's better to do that here and block a background task
@@ -4776,8 +4811,14 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                match peer_state.channel_by_id.entry(channel_id) {
-                                       hash_map::Entry::Occupied(chan_entry) => {
-                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+                                       hash_map::Entry::Occupied(chan_phase_entry) => {
+                                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get() {
+                                                       self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan)
+                                               } else {
+                                                       // We shouldn't be trying to fail holding cell HTLCs on an unfunded channel.
+                                                       debug_assert!(false);
+                                                       (0x4000|10, Vec::new())
+                                               }
                                        },
                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                }
@@ -5036,36 +5077,38 @@ where
                        if peer_state_opt.is_some() {
                                let mut peer_state_lock = peer_state_opt.unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
-                                       let counterparty_node_id = chan.get().context.get_counterparty_node_id();
-                                       let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
-                                       if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
-                                               if let Some(action) = completion_action(Some(htlc_value_msat)) {
-                                                       log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
-                                                               &chan_id, action);
-                                                       peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
-                                               }
-                                               if !during_init {
-                                                       let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
-                                                               peer_state, per_peer_state, chan);
-                                                       if let Err(e) = res {
-                                                               // TODO: This is a *critical* error - we probably updated the outbound edge
-                                                               // of the HTLC's monitor with a preimage. We should retry this monitor
-                                                               // update over and over again until morale improves.
-                                                               log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
-                                                               return Err((counterparty_node_id, e));
+                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let counterparty_node_id = chan.context.get_counterparty_node_id();
+                                               let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+                                               if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+                                                       if let Some(action) = completion_action(Some(htlc_value_msat)) {
+                                                               log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                                       chan_id, action);
+                                                               peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+                                                       }
+                                                       if !during_init {
+                                                               let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+                                                                       peer_state, per_peer_state, chan_phase_entry);
+                                                               if let Err(e) = res {
+                                                                       // TODO: This is a *critical* error - we probably updated the outbound edge
+                                                                       // of the HTLC's monitor with a preimage. We should retry this monitor
+                                                                       // update over and over again until morale improves.
+                                                                       log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+                                                                       return Err((counterparty_node_id, e));
+                                                               }
+                                                       } else {
+                                                               // If we're running during init we cannot update a monitor directly -
+                                                               // they probably haven't actually been loaded yet. Instead, push the
+                                                               // monitor update as a background event.
+                                                               self.pending_background_events.lock().unwrap().push(
+                                                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                                               counterparty_node_id,
+                                                                               funding_txo: prev_hop.outpoint,
+                                                                               update: monitor_update.clone(),
+                                                                       });
                                                        }
-                                               } else {
-                                                       // If we're running during init we cannot update a monitor directly -
-                                                       // they probably haven't actually been loaded yet. Instead, push the
-                                                       // monitor update as a background event.
-                                                       self.pending_background_events.lock().unwrap().push(
-                                                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                                                       counterparty_node_id,
-                                                                       funding_txo: prev_hop.outpoint,
-                                                                       update: monitor_update.clone(),
-                                                               });
                                                }
                                        }
                                        return Ok(());
@@ -5298,7 +5341,7 @@ where
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
-                       if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
@@ -5428,7 +5471,7 @@ where
                        msg: channel.accept_inbound_channel(),
                });
 
-               peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel);
+               peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
 
                Ok(())
        }
@@ -5460,18 +5503,26 @@ where
                peer: &PeerState<SP>, best_block_height: u32
        ) -> usize {
                let mut num_unfunded_channels = 0;
-               for (_, chan) in peer.channel_by_id.iter() {
-                       // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
-                       // which have not yet had any confirmations on-chain.
-                       if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
-                               chan.context.get_funding_tx_confirmations(best_block_height) == 0
-                       {
-                               num_unfunded_channels += 1;
-                       }
-               }
-               for (_, chan) in peer.inbound_v1_channel_by_id.iter() {
-                       if chan.context.minimum_depth().unwrap_or(1) != 0 {
-                               num_unfunded_channels += 1;
+               for (_, phase) in peer.channel_by_id.iter() {
+                       match phase {
+                               ChannelPhase::Funded(chan) => {
+                                       // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
+                                       // which have not yet had any confirmations on-chain.
+                                       if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+                                               chan.context.get_funding_tx_confirmations(best_block_height) == 0
+                                       {
+                                               num_unfunded_channels += 1;
+                                       }
+                               },
+                               ChannelPhase::UnfundedInboundV1(chan) => {
+                                       if chan.context.minimum_depth().unwrap_or(1) != 0 {
+                                               num_unfunded_channels += 1;
+                                       }
+                               },
+                               ChannelPhase::UnfundedOutboundV1(_) => {
+                                       // Outbound channels don't contribute to the unfunded count in the DoS context.
+                                       continue;
+                               }
                        }
                }
                num_unfunded_channels + peer.inbound_channel_request_by_id.len()
@@ -5572,7 +5623,7 @@ where
                        node_id: counterparty_node_id.clone(),
                        msg: channel.accept_inbound_channel(),
                });
-               peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
+               peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel));
                Ok(())
        }
 
@@ -5586,10 +5637,17 @@ where
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
-                                       (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
+                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                               hash_map::Entry::Occupied(mut phase) => {
+                                       match phase.get_mut() {
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
+                                                       (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
+                                               },
+                                               _ => {
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                                               }
+                                       }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
@@ -5618,8 +5676,8 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let (chan, funding_msg, monitor) =
-                       match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) {
-                               Some(inbound_chan) => {
+                       match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
+                               Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
                                        match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
                                                Ok(res) => res,
                                                Err((mut inbound_chan, err)) => {
@@ -5634,6 +5692,9 @@ where
                                                },
                                        }
                                },
+                               Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                               },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        };
 
@@ -5665,22 +5726,25 @@ where
 
                                let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 
-                               let chan = e.insert(chan);
-                               let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
-                                       per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
-                                       { peer_state.channel_by_id.remove(&new_channel_id) });
-
-                               // Note that we reply with the new channel_id in error messages if we gave up on the
-                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                               // any messages referencing a previously-closed channel anyway.
-                               // We do not propagate the monitor update to the user as it would be for a monitor
-                               // that we didn't manage to store (and that we don't care about - we don't respond
-                               // with the funding_signed so the channel can never go on chain).
-                               if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
-                                       res.0 = None;
+                               if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
+                                       let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
+                                               per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
+                                               { peer_state.channel_by_id.remove(&new_channel_id) });
+
+                                       // Note that we reply with the new channel_id in error messages if we gave up on the
+                                       // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                                       // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                                       // any messages referencing a previously-closed channel anyway.
+                                       // We do not propagate the monitor update to the user as it would be for a monitor
+                                       // that we didn't manage to store (and that we don't care about - we don't respond
+                                       // with the funding_signed so the channel can never go on chain).
+                                       if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+                                               res.0 = None;
+                                       }
+                                       res.map(|_| ())
+                               } else {
+                                       unreachable!("This must be a funded channel as we just inserted it.");
                                }
-                               res.map(|_| ())
                        }
                }
        }
@@ -5697,20 +5761,27 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               let monitor = try_chan_entry!(self,
-                                       chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
-                               let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
-                               let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
-                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                       // monitor update contained within `shutdown_finish` was applied.
-                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                               shutdown_finish.0.take();
-                                       }
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               match chan_phase_entry.get_mut() {
+                                       ChannelPhase::Funded(ref mut chan) => {
+                                               let monitor = try_chan_phase_entry!(self,
+                                                       chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
+                                               let update_res = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor);
+                                               let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
+                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                       // monitor update contained within `shutdown_finish` was applied.
+                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                               shutdown_finish.0.take();
+                                                       }
+                                               }
+                                               res.map(|_| ())
+                                       },
+                                       _ => {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
+                                       },
                                }
-                               res.map(|_| ())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
@@ -5726,38 +5797,45 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
-                                       self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
-                               if let Some(announcement_sigs) = announcement_sigs_opt {
-                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", &chan.get().context.channel_id());
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg: announcement_sigs,
-                                       });
-                               } else if chan.get().context.is_usable() {
-                                       // If we're sending an announcement_signatures, we'll send the (public)
-                                       // channel_update after sending a channel_announcement when we receive our
-                                       // counterparty's announcement_signatures. Thus, we only bother to send a
-                                       // channel_update here if the channel is not public, i.e. we're not sending an
-                                       // announcement_signatures.
-                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", &chan.get().context.channel_id());
-                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
+                                               self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+                                       if let Some(announcement_sigs) = announcement_sigs_opt {
+                                               log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                        node_id: counterparty_node_id.clone(),
-                                                       msg,
+                                                       msg: announcement_sigs,
                                                });
+                                       } else if chan.context.is_usable() {
+                                               // If we're sending an announcement_signatures, we'll send the (public)
+                                               // channel_update after sending a channel_announcement when we receive our
+                                               // counterparty's announcement_signatures. Thus, we only bother to send a
+                                               // channel_update here if the channel is not public, i.e. we're not sending an
+                                               // announcement_signatures.
+                                               log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
+                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg,
+                                                       });
+                                               }
                                        }
-                               }
 
-                               {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                       emit_channel_ready_event!(pending_events, chan.get_mut());
-                               }
+                                       {
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               emit_channel_ready_event!(pending_events, chan);
+                                       }
 
-                               Ok(())
+                                       Ok(())
+                               } else {
+                                       try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
+                               }
                        },
-                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                       hash_map::Entry::Vacant(_) => {
+                               Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                       }
                }
        }
 
@@ -5772,48 +5850,46 @@ where
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
-                       // https://github.com/lightningdevkit/rust-lightning/issues/2422
-                       if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
-                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
-                               self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
-                               let mut chan = remove_channel!(self, chan_entry);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               return Ok(());
-                       } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
-                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
-                               self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
-                               let mut chan = remove_channel!(self, chan_entry);
-                               self.finish_force_close_channel(chan.context.force_shutdown(false));
-                               return Ok(());
-                       } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                               if !chan_entry.get().received_shutdown() {
-                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
-                                               &msg.channel_id,
-                                               if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
-                               }
+                       if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+                               let phase = chan_phase_entry.get_mut();
+                               match phase {
+                                       ChannelPhase::Funded(chan) => {
+                                               if !chan.received_shutdown() {
+                                                       log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                               msg.channel_id,
+                                                               if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
+                                               }
 
-                               let funding_txo_opt = chan_entry.get().context.get_funding_txo();
-                               let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
-                                       chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
-                               dropped_htlcs = htlcs;
-
-                               if let Some(msg) = shutdown {
-                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                       // here as we don't need the monitor update to complete until we send a
-                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                               node_id: *counterparty_node_id,
-                                               msg,
-                                       });
-                               }
+                                               let funding_txo_opt = chan.context.get_funding_txo();
+                                               let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self,
+                                                       chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
+                                               dropped_htlcs = htlcs;
 
-                               // Update the monitor with the shutdown script if necessary.
-                               if let Some(monitor_update) = monitor_update_opt {
-                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                               peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+                                               if let Some(msg) = shutdown {
+                                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                                       // here as we don't need the monitor update to complete until we send a
+                                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: *counterparty_node_id,
+                                                               msg,
+                                                       });
+                                               }
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update_opt {
+                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+                                               }
+                                               break Ok(());
+                                       },
+                                       ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+                                               let context = phase.context_mut();
+                                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+                                               self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+                                               let mut chan = remove_channel_phase!(self, chan_phase_entry);
+                                               self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
+                                               return Ok(());
+                                       },
                                }
-                               break Ok(());
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -5838,22 +5914,27 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
-                               hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
-                                       if let Some(msg) = closing_signed {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+                                               if let Some(msg) = closing_signed {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg,
+                                                       });
+                                               }
+                                               if tx.is_some() {
+                                                       // We're done with this channel, we've got a signed closing transaction and
+                                                       // will send the closing_signed back to the remote peer upon return. This
+                                                       // also implies there are no pending HTLCs left on the channel, so we can
+                                                       // fully delete it from tracking (the channel monitor is still around to
+                                                       // watch for old state broadcasts)!
+                                                       (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
+                                               } else { (tx, None) }
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
                                        }
-                                       if tx.is_some() {
-                                               // We're done with this channel, we've got a signed closing transaction and
-                                               // will send the closing_signed back to the remote peer upon return. This
-                                               // also implies there are no pending HTLCs left on the channel, so we can
-                                               // fully delete it from tracking (the channel monitor is still around to
-                                               // watch for old state broadcasts)!
-                                               (tx, Some(remove_channel!(self, chan_entry)))
-                                       } else { (tx, None) }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -5862,7 +5943,7 @@ where
                        log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
                        self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
-               if let Some(chan) = chan_option {
+               if let Some(ChannelPhase::Funded(chan)) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
@@ -5895,37 +5976,41 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-
-                               let pending_forward_info = match decoded_hop_res {
-                                       Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
-                                               self.construct_pending_htlc_status(msg, shared_secret, next_hop,
-                                                       chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt),
-                                       Err(e) => PendingHTLCStatus::Fail(e)
-                               };
-                               let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
-                                       // If the update_add is completely bogus, the call will Err and we will close,
-                                       // but if we've sent a shutdown and they haven't acknowledged it yet, we just
-                                       // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                       match pending_forward_info {
-                                               PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
-                                                       let reason = if (error_code & 0x1000) != 0 {
-                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
-                                                               HTLCFailReason::reason(real_code, error_data)
-                                                       } else {
-                                                               HTLCFailReason::from_failure_code(error_code)
-                                                       }.get_encrypted_failure_packet(incoming_shared_secret, &None);
-                                                       let msg = msgs::UpdateFailHTLC {
-                                                               channel_id: msg.channel_id,
-                                                               htlc_id: msg.htlc_id,
-                                                               reason
-                                                       };
-                                                       PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
-                                               },
-                                               _ => pending_forward_info
-                                       }
-                               };
-                               try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let pending_forward_info = match decoded_hop_res {
+                                               Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
+                                                       self.construct_pending_htlc_status(msg, shared_secret, next_hop,
+                                                               chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+                                               Err(e) => PendingHTLCStatus::Fail(e)
+                                       };
+                                       let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                               // If the update_add is completely bogus, the call will Err and we will close,
+                                               // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+                                               // want to reject the new HTLC and fail it backwards instead of forwarding.
+                                               match pending_forward_info {
+                                                       PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+                                                               let reason = if (error_code & 0x1000) != 0 {
+                                                                       let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+                                                                       HTLCFailReason::reason(real_code, error_data)
+                                                               } else {
+                                                                       HTLCFailReason::from_failure_code(error_code)
+                                                               }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+                                                               let msg = msgs::UpdateFailHTLC {
+                                                                       channel_id: msg.channel_id,
+                                                                       htlc_id: msg.htlc_id,
+                                                                       reason
+                                                               };
+                                                               PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+                                                       },
+                                                       _ => pending_forward_info
+                                               }
+                                       };
+                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5944,10 +6029,15 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
-                                       funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded");
-                                       res
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
+                                               funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+                                               res
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
+                                       }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -5966,8 +6056,13 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5984,12 +6079,17 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if (msg.failure_code & 0x8000) == 0 {
                                        let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
-                                       try_chan_entry!(self, Err(chan_err), chan);
+                                       try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry);
+                               }
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
                                }
-                               try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
                                Ok(())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -6006,13 +6106,18 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               let funding_txo = chan.get().context.get_funding_txo();
-                               let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
-                               if let Some(monitor_update) = monitor_update_opt {
-                                       handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
-                                               peer_state, per_peer_state, chan).map(|_| ())
-                               } else { Ok(()) }
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       let funding_txo = chan.context.get_funding_txo();
+                                       let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
+                                       if let Some(monitor_update) = monitor_update_opt {
+                                               handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+                                                       peer_state, per_peer_state, chan_phase_entry).map(|_| ())
+                                       } else { Ok(()) }
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -6149,22 +6254,27 @@ where
                                }).map(|mtx| mtx.lock().unwrap())?;
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       let funding_txo_opt = chan.get().context.get_funding_txo();
-                                       let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
-                                               self.raa_monitor_updates_held(
-                                                       &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
-                                                       *counterparty_node_id)
-                                       } else { false };
-                                       let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
-                                               chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan);
-                                       let res = if let Some(monitor_update) = monitor_update_opt {
-                                               let funding_txo = funding_txo_opt
-                                                       .expect("Funding outpoint must have been set for RAA handling to succeed");
-                                               handle_new_monitor_update!(self, funding_txo, monitor_update,
-                                                       peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
-                                       } else { Ok(()) };
-                                       (htlcs_to_fail, res)
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let funding_txo_opt = chan.context.get_funding_txo();
+                                               let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
+                                                       self.raa_monitor_updates_held(
+                                                               &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+                                                               *counterparty_node_id)
+                                               } else { false };
+                                               let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
+                                                       chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
+                                               let res = if let Some(monitor_update) = monitor_update_opt {
+                                                       let funding_txo = funding_txo_opt
+                                                               .expect("Funding outpoint must have been set for RAA handling to succeed");
+                                                       handle_new_monitor_update!(self, funding_txo, monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
+                                               } else { Ok(()) };
+                                               (htlcs_to_fail, res)
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
+                                       }
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -6183,8 +6293,13 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_phase_entry);
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -6201,20 +6316,25 @@ where
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if !chan.get().context.is_usable() {
-                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
-                               }
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       if !chan.context.is_usable() {
+                                               return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
+                                       }
 
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                       msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
-                                               &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
-                                               msg, &self.default_configuration
-                                       ), chan),
-                                       // Note that announcement_signatures fails if the channel cannot be announced,
-                                       // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                       update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()),
-                               });
+                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                               msg: try_chan_phase_entry!(self, chan.announcement_signatures(
+                                                       &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+                                                       msg, &self.default_configuration
+                                               ), chan_phase_entry),
+                                               // Note that announcement_signatures fails if the channel cannot be announced,
+                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                               update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
+                                       });
+                               } else {
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
+                               }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -6238,23 +6358,28 @@ where
                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(chan_id) {
-                       hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
-                                       if chan.get().context.should_announce() {
-                                               // If the announcement is about a channel of ours which is public, some
-                                               // other peer may simply be forwarding all its gossip to us. Don't provide
-                                               // a scary-looking error message and return Ok instead.
+                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                       if chan.context.get_counterparty_node_id() != *counterparty_node_id {
+                                               if chan.context.should_announce() {
+                                                       // If the announcement is about a channel of ours which is public, some
+                                                       // other peer may simply be forwarding all its gossip to us. Don't provide
+                                                       // a scary-looking error message and return Ok instead.
+                                                       return Ok(NotifyOption::SkipPersist);
+                                               }
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+                                       }
+                                       let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
+                                       let msg_from_node_one = msg.contents.flags & 1 == 0;
+                                       if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersist);
+                                       } else {
+                                               log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
+                                               try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
                                        }
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
-                               }
-                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
-                               let msg_from_node_one = msg.contents.flags & 1 == 0;
-                               if were_node_one == msg_from_node_one {
-                                       return Ok(NotifyOption::SkipPersist);
                                } else {
-                                       log_debug!(self.logger, "Received channel_update for channel {}.", &chan_id);
-                                       try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
+                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
                        hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
@@ -6275,39 +6400,44 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       // Currently, we expect all holding cell update_adds to be dropped on peer
-                                       // disconnect, so Channel's reestablish will never hand us any holding cell
-                                       // freed HTLCs to fail backwards. If in the future we no longer drop pending
-                                       // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
-                                       let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
-                                               msg, &self.logger, &self.node_signer, self.genesis_hash,
-                                               &self.default_configuration, &*self.best_block.read().unwrap()), chan);
-                                       let mut channel_update = None;
-                                       if let Some(msg) = responses.shutdown_msg {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       } else if chan.get().context.is_usable() {
-                                               // If the channel is in a usable state (ie the channel is not being shut
-                                               // down), send a unicast channel_update to our counterparty to make sure
-                                               // they have the latest channel parameters.
-                                               if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
-                                                       channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: chan.get().context.get_counterparty_node_id(),
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               // Currently, we expect all holding cell update_adds to be dropped on peer
+                                               // disconnect, so Channel's reestablish will never hand us any holding cell
+                                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+                                               let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
+                                                       msg, &self.logger, &self.node_signer, self.genesis_hash,
+                                                       &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
+                                               let mut channel_update = None;
+                                               if let Some(msg) = responses.shutdown_msg {
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                               node_id: counterparty_node_id.clone(),
                                                                msg,
                                                        });
+                                               } else if chan.context.is_usable() {
+                                                       // If the channel is in a usable state (ie the channel is not being shut
+                                                       // down), send a unicast channel_update to our counterparty to make sure
+                                                       // they have the latest channel parameters.
+                                                       if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
+                                                               channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+                                                                       node_id: chan.context.get_counterparty_node_id(),
+                                                                       msg,
+                                                               });
+                                                       }
                                                }
+                                               let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
+                                               htlc_forwards = self.handle_channel_resumption(
+                                                       &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
+                                                       Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                               if let Some(upd) = channel_update {
+                                                       peer_state.pending_msg_events.push(upd);
+                                               }
+                                               need_lnd_workaround
+                                       } else {
+                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                                       "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
                                        }
-                                       let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take();
-                                       htlc_forwards = self.handle_channel_resumption(
-                                               &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
-                                               Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
-                                       if let Some(upd) = channel_update {
-                                               peer_state.pending_msg_events.push(upd);
-                                       }
-                                       need_lnd_workaround
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
@@ -6361,26 +6491,27 @@ where
                                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                                let peer_state = &mut *peer_state_lock;
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                                                               if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
-                                                                       let mut chan = remove_channel!(self, chan_entry);
-                                                                       failed_channels.push(chan.context.force_shutdown(false));
-                                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: update
+                                                               if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+                                                                       if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
+                                                                               failed_channels.push(chan.context.force_shutdown(false));
+                                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               msg: update
+                                                                                       });
+                                                                               }
+                                                                               let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+                                                                                       ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+                                                                               } else {
+                                                                                       ClosureReason::CommitmentTxConfirmed
+                                                                               };
+                                                                               self.issue_channel_close_events(&chan.context, reason);
+                                                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                                       node_id: chan.context.get_counterparty_node_id(),
+                                                                                       action: msgs::ErrorAction::SendErrorMessage {
+                                                                                               msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                                       },
                                                                                });
                                                                        }
-                                                                       let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
-                                                                               ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
-                                                                       } else {
-                                                                               ClosureReason::CommitmentTxConfirmed
-                                                                       };
-                                                                       self.issue_channel_close_events(&chan.context, reason);
-                                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                                               node_id: chan.context.get_counterparty_node_id(),
-                                                                               action: msgs::ErrorAction::SendErrorMessage {
-                                                                                       msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
-                                                                               },
-                                                                       });
                                                                }
                                                        }
                                                }
@@ -6426,7 +6557,9 @@ where
                                'chan_loop: loop {
                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                        let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
-                                       for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                       for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
+                                               |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
+                                       ) {
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
@@ -6478,38 +6611,43 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|channel_id, chan| {
-                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
-                                               Ok((msg_opt, tx_opt)) => {
-                                                       if let Some(msg) = msg_opt {
-                                                               has_update = true;
-                                                               pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                                       node_id: chan.context.get_counterparty_node_id(), msg,
-                                                               });
-                                                       }
-                                                       if let Some(tx) = tx_opt {
-                                                               // We're done with this channel. We got a closing_signed and sent back
-                                                               // a closing_signed with a closing transaction to broadcast.
-                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: update
-                                                                       });
-                                                               }
+                               peer_state.channel_by_id.retain(|channel_id, phase| {
+                                       match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                                               Ok((msg_opt, tx_opt)) => {
+                                                                       if let Some(msg) = msg_opt {
+                                                                               has_update = true;
+                                                                               pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                                                       node_id: chan.context.get_counterparty_node_id(), msg,
+                                                                               });
+                                                                       }
+                                                                       if let Some(tx) = tx_opt {
+                                                                               // We're done with this channel. We got a closing_signed and sent back
+                                                                               // a closing_signed with a closing transaction to broadcast.
+                                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               msg: update
+                                                                                       });
+                                                                               }
 
-                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
+                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
 
-                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
-                                                               self.tx_broadcaster.broadcast_transactions(&[&tx]);
-                                                               update_maps_on_chan_removal!(self, &chan.context);
-                                                               false
-                                                       } else { true }
+                                                                               log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                                               self.tx_broadcaster.broadcast_transactions(&[&tx]);
+                                                                               update_maps_on_chan_removal!(self, &chan.context);
+                                                                               false
+                                                                       } else { true }
+                                                               },
+                                                               Err(e) => {
+                                                                       has_update = true;
+                                                                       let (close_channel, res) = convert_chan_phase_err!(self, e, chan, channel_id, FUNDED_CHANNEL);
+                                                                       handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
+                                                                       !close_channel
+                                                               }
+                                                       }
                                                },
-                                               Err(e) => {
-                                                       has_update = true;
-                                                       let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                                       handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
-                                                       !close_channel
-                                               }
+                                               _ => true, // Retain unfunded channels if present.
                                        }
                                });
                        }
@@ -6702,7 +6840,9 @@ where
                for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for chan in peer_state.channel_by_id.values() {
+                       for chan in peer_state.channel_by_id.values().filter_map(
+                               |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+                       ) {
                                for (htlc_source, _) in chan.inflight_htlc_sources() {
                                        if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
                                                inflight_htlcs.process_path(path, self.get_our_node_id());
@@ -6775,24 +6915,26 @@ where
                                        break;
                                }
 
-                               if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
-                                       debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
-                                       if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
-                                               log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
-                                                       &channel_funding_outpoint.to_channel_id());
-                                               if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
-                                                       peer_state_lck, peer_state, per_peer_state, chan)
-                                               {
-                                                       errors.push((e, counterparty_node_id));
-                                               }
-                                               if further_update_exists {
-                                                       // If there are more `ChannelMonitorUpdate`s to process, restart at the
-                                                       // top of the loop.
-                                                       continue;
+                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
+                                               if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
+                                                       log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+                                                               channel_funding_outpoint.to_channel_id());
+                                                       if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+                                                               peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
+                                                       {
+                                                               errors.push((e, counterparty_node_id));
+                                                       }
+                                                       if further_update_exists {
+                                                               // If there are more `ChannelMonitorUpdate`s to process, restart at the
+                                                               // top of the loop.
+                                                               continue;
+                                                       }
+                                               } else {
+                                                       log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+                                                               channel_funding_outpoint.to_channel_id());
                                                }
-                                       } else {
-                                               log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
-                                                       &channel_funding_outpoint.to_channel_id());
                                        }
                                }
                        } else {
@@ -7029,7 +7171,7 @@ where
                for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for chan in peer_state.channel_by_id.values() {
+                       for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
                                if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
                                        res.push((funding_txo.txid, Some(block_hash)));
                                }
@@ -7079,88 +7221,94 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|_, channel| {
-                                       let res = f(channel);
-                                       if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
-                                               for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                                       let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
-                                                       timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
-                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
-                                               }
-                                               if let Some(channel_ready) = channel_ready_opt {
-                                                       send_channel_ready!(self, pending_msg_events, channel, channel_ready);
-                                                       if channel.context.is_usable() {
-                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", &channel.context.channel_id());
-                                                               if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                                               node_id: channel.context.get_counterparty_node_id(),
-                                                                               msg,
-                                                                       });
+                               peer_state.channel_by_id.retain(|_, phase| {
+                                       match phase {
+                                               // Retain unfunded channels.
+                                               ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
+                                               ChannelPhase::Funded(channel) => {
+                                                       let res = f(channel);
+                                                       if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+                                                               for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
+                                                                       let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
+                                                                       timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
+                                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
+                                                               }
+                                                               if let Some(channel_ready) = channel_ready_opt {
+                                                                       send_channel_ready!(self, pending_msg_events, channel, channel_ready);
+                                                                       if channel.context.is_usable() {
+                                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
+                                                                               if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                                                                               node_id: channel.context.get_counterparty_node_id(),
+                                                                                               msg,
+                                                                                       });
+                                                                               }
+                                                                       } else {
+                                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
+                                                                       }
                                                                }
-                                                       } else {
-                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", &channel.context.channel_id());
-                                                       }
-                                               }
 
-                                               {
-                                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                                       emit_channel_ready_event!(pending_events, channel);
-                                               }
+                                                               {
+                                                                       let mut pending_events = self.pending_events.lock().unwrap();
+                                                                       emit_channel_ready_event!(pending_events, channel);
+                                                               }
 
-                                               if let Some(announcement_sigs) = announcement_sigs {
-                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", &channel.context.channel_id());
-                                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                               node_id: channel.context.get_counterparty_node_id(),
-                                                               msg: announcement_sigs,
-                                                       });
-                                                       if let Some(height) = height_opt {
-                                                               if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                                                               msg: announcement,
-                                                                               // Note that announcement_signatures fails if the channel cannot be announced,
-                                                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
-                                                                               update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
+                                                               if let Some(announcement_sigs) = announcement_sigs {
+                                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
+                                                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                                                                               node_id: channel.context.get_counterparty_node_id(),
+                                                                               msg: announcement_sigs,
                                                                        });
+                                                                       if let Some(height) = height_opt {
+                                                                               if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
+                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+                                                                                               msg: announcement,
+                                                                                               // Note that announcement_signatures fails if the channel cannot be announced,
+                                                                                               // so get_channel_update_for_broadcast will never fail by the time we get here.
+                                                                                               update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
+                                                                                       });
+                                                                               }
+                                                                       }
                                                                }
+                                                               if channel.is_our_channel_ready() {
+                                                                       if let Some(real_scid) = channel.context.get_short_channel_id() {
+                                                                               // If we sent a 0conf channel_ready, and now have an SCID, we add it
+                                                                               // to the short_to_chan_info map here. Note that we check whether we
+                                                                               // can relay using the real SCID at relay-time (i.e.
+                                                                               // enforce option_scid_alias then), and if the funding tx is ever
+                                                                               // un-confirmed we force-close the channel, ensuring short_to_chan_info
+                                                                               // is always consistent.
+                                                                               let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
+                                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+                                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
+                                                                                       "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+                                                                                       fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+                                                                       }
+                                                               }
+                                                       } else if let Err(reason) = res {
+                                                               update_maps_on_chan_removal!(self, &channel.context);
+                                                               // It looks like our counterparty went on-chain or funding transaction was
+                                                               // reorged out of the main chain. Close the channel.
+                                                               failed_channels.push(channel.context.force_shutdown(true));
+                                                               if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
+                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                               msg: update
+                                                                       });
+                                                               }
+                                                               let reason_message = format!("{}", reason);
+                                                               self.issue_channel_close_events(&channel.context, reason);
+                                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                       node_id: channel.context.get_counterparty_node_id(),
+                                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
+                                                                               channel_id: channel.context.channel_id(),
+                                                                               data: reason_message,
+                                                                       } },
+                                                               });
+                                                               return false;
                                                        }
+                                                       true
                                                }
-                                               if channel.is_our_channel_ready() {
-                                                       if let Some(real_scid) = channel.context.get_short_channel_id() {
-                                                               // If we sent a 0conf channel_ready, and now have an SCID, we add it
-                                                               // to the short_to_chan_info map here. Note that we check whether we
-                                                               // can relay using the real SCID at relay-time (i.e.
-                                                               // enforce option_scid_alias then), and if the funding tx is ever
-                                                               // un-confirmed we force-close the channel, ensuring short_to_chan_info
-                                                               // is always consistent.
-                                                               let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
-                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
-                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
-                                                                       "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
-                                                                       fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
-                                                       }
-                                               }
-                                       } else if let Err(reason) = res {
-                                               update_maps_on_chan_removal!(self, &channel.context);
-                                               // It looks like our counterparty went on-chain or funding transaction was
-                                               // reorged out of the main chain. Close the channel.
-                                               failed_channels.push(channel.context.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
-                                               let reason_message = format!("{}", reason);
-                                               self.issue_channel_close_events(&channel.context, reason);
-                                               pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: channel.context.get_counterparty_node_id(),
-                                                       action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                               channel_id: channel.context.channel_id(),
-                                                               data: reason_message,
-                                                       } },
-                                               });
-                                               return false;
                                        }
-                                       true
                                });
                        }
                }
@@ -7398,23 +7546,27 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|_, chan| {
-                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                       if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, &chan.context);
-                                               self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
-                                               return false;
-                                       }
-                                       true
-                               });
-                               peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
-                                       update_maps_on_chan_removal!(self, &chan.context);
-                                       self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
-                                       false
-                               });
-                               peer_state.outbound_v1_channel_by_id.retain(|_, chan| {
-                                       update_maps_on_chan_removal!(self, &chan.context);
-                                       self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+                               peer_state.channel_by_id.retain(|_, phase| {
+                                       let context = match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                                       // We only retain funded channels that are not shutdown.
+                                                       if !chan.is_shutdown() {
+                                                               return true;
+                                                       }
+                                                       &chan.context
+                                               },
+                                               // Unfunded channels will always be removed.
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       &chan.context
+                                               },
+                                               ChannelPhase::UnfundedInboundV1(chan) => {
+                                                       &chan.context
+                                               },
+                                       };
+                                       // Clean up for removal.
+                                       update_maps_on_chan_removal!(self, &context);
+                                       self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
@@ -7501,8 +7653,6 @@ where
                                        }
                                        e.insert(Mutex::new(PeerState {
                                                channel_by_id: HashMap::new(),
-                                               outbound_v1_channel_by_id: HashMap::new(),
-                                               inbound_v1_channel_by_id: HashMap::new(),
                                                inbound_channel_request_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
                                                pending_msg_events: Vec::new(),
@@ -7538,11 +7688,15 @@ where
                        let peer_state = &mut *peer_state_lock;
                        let pending_msg_events = &mut peer_state.pending_msg_events;
 
-                       // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
-                       // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
-                       // clearing their maps here. Instead we can just send queue channel_reestablish messages for
-                       // channels in the channel_by_id map.
-                       peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+                       peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
+                               if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
+                                       // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+                                       // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
+                                       // worry about closing and removing them.
+                                       debug_assert!(false);
+                                       None
+                               }
+                       ).for_each(|chan| {
                                pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
                                        node_id: chan.context.get_counterparty_node_id(),
                                        msg: chan.get_channel_reestablish(&self.logger),
@@ -7572,7 +7726,7 @@ where
                                        let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() { return; }
                                        let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
-                                       if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) {
+                                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
                                                if let Some(msg) = chan.get_outbound_shutdown() {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                                node_id: *counterparty_node_id,
@@ -7606,9 +7760,7 @@ where
                                // Note that we don't bother generating any events for pre-accept channels -
                                // they're not considered "channels" yet from the PoV of our events interface.
                                peer_state.inbound_channel_request_by_id.clear();
-                               peer_state.channel_by_id.keys().cloned()
-                                       .chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
-                                       .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
+                               peer_state.channel_by_id.keys().cloned().collect()
                        };
                        for channel_id in channel_ids {
                                // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
@@ -7622,7 +7774,7 @@ where
                                if peer_state_mutex_opt.is_none() { return; }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
+                               if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
                                        if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
                                                        node_id: *counterparty_node_id,
@@ -8203,31 +8355,30 @@ where
                let mut serializable_peer_count: u64 = 0;
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       let mut unfunded_channels = 0;
-                       let mut number_of_channels = 0;
+                       let mut number_of_funded_channels = 0;
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if !peer_state.ok_to_remove(false) {
                                        serializable_peer_count += 1;
                                }
-                               number_of_channels += peer_state.channel_by_id.len();
-                               for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if !channel.context.is_funding_initiated() {
-                                               unfunded_channels += 1;
-                                       }
-                               }
+
+                               number_of_funded_channels += peer_state.channel_by_id.iter().filter(
+                                       |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false }
+                               ).count();
                        }
 
-                       ((number_of_channels - unfunded_channels) as u64).write(writer)?;
+                       (number_of_funded_channels as u64).write(writer)?;
 
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
-                               for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if channel.context.is_funding_initiated() {
-                                               channel.write(writer)?;
-                                       }
+                               for channel in peer_state.channel_by_id.iter().filter_map(
+                                       |(_, phase)| if let ChannelPhase::Funded(channel) = phase {
+                                               if channel.context.is_funding_initiated() { Some(channel) } else { None }
+                                       } else { None }
+                               ) {
+                                       channel.write(writer)?;
                                }
                        }
                }
@@ -8340,6 +8491,8 @@ where
                                                session_priv.write(writer)?;
                                        }
                                }
+                               PendingOutboundPayment::AwaitingInvoice { .. } => {},
+                               PendingOutboundPayment::InvoiceReceived { .. } => {},
                                PendingOutboundPayment::Fulfilled { .. } => {},
                                PendingOutboundPayment::Abandoned { .. } => {},
                        }
@@ -8611,7 +8764,7 @@ where
 
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut peer_channels: HashMap<PublicKey, HashMap<ChannelId, Channel<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
@@ -8630,8 +8783,22 @@ where
                                        // But if the channel is behind of the monitor, close the channel:
                                        log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+                                       if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+                                                       &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+                                       }
+                                       if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
+                                                       &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
+                                       }
+                                       if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
+                                                       &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
+                                       }
+                                       if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
+                                                       &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
+                                       }
                                        let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -8675,14 +8842,14 @@ where
                                        if channel.context.is_funding_initiated() {
                                                id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
                                        }
-                                       match peer_channels.entry(channel.context.get_counterparty_node_id()) {
+                                       match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        let by_id_map = entry.get_mut();
-                                                       by_id_map.insert(channel.context.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        let mut by_id_map = HashMap::new();
-                                                       by_id_map.insert(channel.context.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                        entry.insert(by_id_map);
                                                }
                                        }
@@ -8749,8 +8916,6 @@ where
                let peer_state_from_chans = |channel_by_id| {
                        PeerState {
                                channel_by_id,
-                               outbound_v1_channel_by_id: HashMap::new(),
-                               inbound_v1_channel_by_id: HashMap::new(),
                                inbound_channel_request_by_id: HashMap::new(),
                                latest_features: InitFeatures::empty(),
                                pending_msg_events: Vec::new(),
@@ -8765,7 +8930,7 @@ where
                let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                for _ in 0..peer_count {
                        let peer_pubkey = Readable::read(reader)?;
-                       let peer_chans = peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+                       let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
                        let mut peer_state = peer_state_from_chans(peer_chans);
                        peer_state.latest_features = Readable::read(reader)?;
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -8926,30 +9091,37 @@ where
                for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mtx.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for (_, chan) in peer_state.channel_by_id.iter() {
-                               // Channels that were persisted have to be funded, otherwise they should have been
-                               // discarded.
-                               let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
-                               let monitor = args.channel_monitors.get(&funding_txo)
-                                       .expect("We already checked for monitor presence when loading channels");
-                               let mut max_in_flight_update_id = monitor.get_latest_update_id();
-                               if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
-                                       if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
-                                               max_in_flight_update_id = cmp::max(max_in_flight_update_id,
-                                                       handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
-                                                               funding_txo, monitor, peer_state, ""));
+                       for phase in peer_state.channel_by_id.values() {
+                               if let ChannelPhase::Funded(chan) = phase {
+                                       // Channels that were persisted have to be funded, otherwise they should have been
+                                       // discarded.
+                                       let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                                       let monitor = args.channel_monitors.get(&funding_txo)
+                                               .expect("We already checked for monitor presence when loading channels");
+                                       let mut max_in_flight_update_id = monitor.get_latest_update_id();
+                                       if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
+                                               if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
+                                                       max_in_flight_update_id = cmp::max(max_in_flight_update_id,
+                                                               handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
+                                                                       funding_txo, monitor, peer_state, ""));
+                                               }
                                        }
-                               }
-                               if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
-                                       // If the channel is ahead of the monitor, return InvalidValue:
-                                       log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
-                                               &chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
-                                       log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
-                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
+                                               // If the channel is ahead of the monitor, return InvalidValue:
+                                               log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                               log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+                                                       chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
+                                               log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+                                               log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                               log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                               log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                               return Err(DecodeError::InvalidValue);
+                                       }
+                               } else {
+                                       // We shouldn't have persisted (or read) any unfunded channel types so none should have been
+                                       // created in this `channel_by_id` map.
+                                       debug_assert!(false);
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
@@ -9221,28 +9393,35 @@ where
                for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
-                               if chan.context.outbound_scid_alias() == 0 {
-                                       let mut outbound_scid_alias;
-                                       loop {
-                                               outbound_scid_alias = fake_scid::Namespace::OutboundAlias
-                                                       .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
-                                               if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
-                                       }
-                                       chan.context.set_outbound_scid_alias(outbound_scid_alias);
-                               } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
-                                       // Note that in rare cases its possible to hit this while reading an older
-                                       // channel if we just happened to pick a colliding outbound alias above.
-                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
-                                       return Err(DecodeError::InvalidValue);
-                               }
-                               if chan.context.is_usable() {
-                                       if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
+                       for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
+                               if let ChannelPhase::Funded(chan) = phase {
+                                       if chan.context.outbound_scid_alias() == 0 {
+                                               let mut outbound_scid_alias;
+                                               loop {
+                                                       outbound_scid_alias = fake_scid::Namespace::OutboundAlias
+                                                               .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
+                                                       if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
+                                               }
+                                               chan.context.set_outbound_scid_alias(outbound_scid_alias);
+                                       } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
                                                // Note that in rare cases its possible to hit this while reading an older
                                                // channel if we just happened to pick a colliding outbound alias above.
                                                log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                return Err(DecodeError::InvalidValue);
                                        }
+                                       if chan.context.is_usable() {
+                                               if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
+                                                       // Note that in rare cases its possible to hit this while reading an older
+                                                       // channel if we just happened to pick a colliding outbound alias above.
+                                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+                                                       return Err(DecodeError::InvalidValue);
+                                               }
+                                       }
+                               } else {
+                                       // We shouldn't have persisted (or read) any unfunded channel types so none should have been
+                                       // created in this `channel_by_id` map.
+                                       debug_assert!(false);
+                                       return Err(DecodeError::InvalidValue);
                                }
                        }
                }
@@ -9284,7 +9463,7 @@ where
                                                        let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
-                                                       if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
+                                                       if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
                                                                channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
                                                        }
                                                }
@@ -9648,10 +9827,9 @@ mod tests {
                let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
 
                // Next, attempt a keysend payment and make sure it fails.
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
-                       final_value_msat: 100_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
+                       TEST_FINAL_CLTV, false), 100_000);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
                        None, nodes[0].logger, &scorer, &(), &random_seed_bytes
@@ -9739,10 +9917,10 @@ mod tests {
                pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
 
                // Next, attempt a keysend payment and make sure it fails.
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
-                       final_value_msat: 100_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
+                       100_000
+               );
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
                        None, nodes[0].logger, &scorer, &(), &random_seed_bytes
@@ -9788,10 +9966,8 @@ mod tests {
                let payee_pubkey = nodes[1].node.get_our_node_id();
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-                       final_value_msat: 10_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
                let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let scorer = test_utils::TestScorer::new();
@@ -9835,10 +10011,8 @@ mod tests {
                let payee_pubkey = nodes[1].node.get_our_node_id();
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-                       final_value_msat: 10_000,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
                let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let scorer = test_utils::TestScorer::new();
@@ -10735,9 +10909,9 @@ pub mod bench {
                                let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
 
                                $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
-                                       PaymentId(payment_hash.0), RouteParameters {
-                                               payment_params, final_value_msat: 10_000,
-                                       }, Retry::Attempts(0)).unwrap();
+                                       PaymentId(payment_hash.0),
+                                       RouteParameters::from_payment_params_and_value(payment_params, 10_000),
+                                       Retry::Attempts(0)).unwrap();
                                let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
                                $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
                                $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
index 1992b8db9e565b7696e60bc286581583ca8beed7..bcaa91ab266fe7f10ebbc8de710fc30d927365c3 100644 (file)
@@ -798,6 +798,35 @@ impl<T: sealed::Context> Features<T> {
                true
        }
 
+       /// Sets a required feature bit. Errors if `bit` is outside the feature range as defined
+       /// by [BOLT 9].
+       ///
+       /// Note: Required bits are even. If an odd bit is given, then the corresponding even bit will
+       /// be set instead (i.e., `bit - 1`).
+       ///
+       /// [BOLT 9]: https://github.com/lightning/bolts/blob/master/09-features.md
+       pub fn set_required_feature_bit(&mut self, bit: usize) -> Result<(), ()> {
+               self.set_feature_bit(bit - (bit % 2))
+       }
+
+       /// Sets an optional feature bit. Errors if `bit` is outside the feature range as defined
+       /// by [BOLT 9].
+       ///
+       /// Note: Optional bits are odd. If an even bit is given, then the corresponding odd bit will be
+       /// set instead (i.e., `bit + 1`).
+       ///
+       /// [BOLT 9]: https://github.com/lightning/bolts/blob/master/09-features.md
+       pub fn set_optional_feature_bit(&mut self, bit: usize) -> Result<(), ()> {
+               self.set_feature_bit(bit + (1 - (bit % 2)))
+       }
+
+       fn set_feature_bit(&mut self, bit: usize) -> Result<(), ()> {
+               if bit > 255 {
+                       return Err(());
+               }
+               self.set_bit(bit, false)
+       }
+
        /// Sets a required custom feature bit. Errors if `bit` is outside the custom range as defined
        /// by [bLIP 2] or if it is a known `T` feature.
        ///
@@ -824,10 +853,13 @@ impl<T: sealed::Context> Features<T> {
                if bit < 256 {
                        return Err(());
                }
+               self.set_bit(bit, true)
+       }
 
+       fn set_bit(&mut self, bit: usize, custom: bool) -> Result<(), ()> {
                let byte_offset = bit / 8;
                let mask = 1 << (bit - 8 * byte_offset);
-               if byte_offset < T::KNOWN_FEATURE_MASK.len() {
+               if byte_offset < T::KNOWN_FEATURE_MASK.len() && custom {
                        if (T::KNOWN_FEATURE_MASK[byte_offset] & mask) != 0 {
                                return Err(());
                        }
@@ -1078,6 +1110,13 @@ mod tests {
                assert!(!features.requires_basic_mpp());
                assert!(features.requires_payment_secret());
                assert!(features.supports_payment_secret());
+
+               // Set flags manually
+               let mut features = NodeFeatures::empty();
+               assert!(features.set_optional_feature_bit(55).is_ok());
+               assert!(features.supports_keysend());
+               assert!(features.set_optional_feature_bit(255).is_ok());
+               assert!(features.set_required_feature_bit(256).is_err());
        }
 
        #[test]
index 80b910159523ed84c5af1b6da2ec83bee7012125..db43c37fd5eb5b8d1dd299c22f836bf017db9f42 100644 (file)
@@ -19,11 +19,11 @@ use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, Walle
 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
 use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
-use crate::routing::router::{self, PaymentParameters, Route};
+use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
 use crate::ln::features::InitFeatures;
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::scid_utils;
 use crate::util::test_utils;
 use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
@@ -46,7 +46,7 @@ use alloc::rc::Rc;
 use crate::sync::{Arc, Mutex, LockTestExt, RwLock};
 use core::mem;
 use core::iter::repeat;
-use bitcoin::{PackedLockTime, TxMerkleNode};
+use bitcoin::{PackedLockTime, TxIn, TxMerkleNode};
 
 pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
@@ -520,7 +520,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
-                                       let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+                                       let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
                                                &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap();
                                        deserialized_monitors.push(deserialized_monitor);
                                }
@@ -811,36 +811,14 @@ macro_rules! get_channel_ref {
        }
 }
 
-#[cfg(test)]
-macro_rules! get_outbound_v1_channel_ref {
-       ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
-               {
-                       $per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
-                       $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
-                       $peer_state_lock.outbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
-               }
-       }
-}
-
-#[cfg(test)]
-macro_rules! get_inbound_v1_channel_ref {
-       ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
-               {
-                       $per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
-                       $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
-                       $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
-               }
-       }
-}
-
 #[cfg(test)]
 macro_rules! get_feerate {
        ($node: expr, $counterparty_node: expr, $channel_id: expr) => {
                {
                        let mut per_peer_state_lock;
                        let mut peer_state_lock;
-                       let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
-                       chan.context.get_feerate_sat_per_1000_weight()
+                       let phase = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
+                       phase.context().get_feerate_sat_per_1000_weight()
                }
        }
 }
@@ -852,7 +830,7 @@ macro_rules! get_channel_type_features {
                        let mut per_peer_state_lock;
                        let mut peer_state_lock;
                        let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
-                       chan.context.get_channel_type().clone()
+                       chan.context().get_channel_type().clone()
                }
        }
 }
@@ -952,7 +930,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
        let mut monitors_read = Vec::with_capacity(monitors_encoded.len());
        for encoded in monitors_encoded {
                let mut monitor_read = &encoded[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>
                        ::read(&mut monitor_read, (node.keys_manager, node.keys_manager)).unwrap();
                assert!(monitor_read.is_empty());
                monitors_read.push(monitor);
@@ -1005,7 +983,23 @@ macro_rules! reload_node {
        };
 }
 
-pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> (ChannelId, Transaction, OutPoint) {
+pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>,
+       expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128)
+ -> (ChannelId, Transaction, OutPoint)
+{
+       internal_create_funding_transaction(node, expected_counterparty_node_id, expected_chan_value, expected_user_chan_id, false)
+}
+
+pub fn create_coinbase_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>,
+       expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128)
+ -> (ChannelId, Transaction, OutPoint)
+{
+       internal_create_funding_transaction(node, expected_counterparty_node_id, expected_chan_value, expected_user_chan_id, true)
+}
+
+fn internal_create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>,
+       expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128,
+       coinbase: bool) -> (ChannelId, Transaction, OutPoint) {
        let chan_id = *node.network_chan_count.borrow();
 
        let events = node.node.get_and_clear_pending_events();
@@ -1016,7 +1010,16 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_
                        assert_eq!(*channel_value_satoshis, expected_chan_value);
                        assert_eq!(user_channel_id, expected_user_chan_id);
 
-                       let tx = Transaction { version: chan_id as i32, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       let input = if coinbase {
+                               vec![TxIn {
+                                       previous_output: bitcoin::OutPoint::null(),
+                                       ..Default::default()
+                               }]
+                       } else {
+                               Vec::new()
+                       };
+
+                       let tx = Transaction { version: chan_id as i32, lock_time: PackedLockTime::ZERO, input, output: vec![TxOut {
                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                        }]};
                        let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
@@ -1025,6 +1028,7 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_
                _ => panic!("Unexpected event"),
        }
 }
+
 pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId) -> Transaction {
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42);
        assert_eq!(temporary_channel_id, expected_temporary_channel_id);
@@ -1838,14 +1842,14 @@ macro_rules! get_payment_preimage_hash {
 }
 
 /// Gets a route from the given sender to the node described in `payment_params`.
-pub fn get_route(send_node: &Node, payment_params: &PaymentParameters, recv_value: u64) -> Result<Route, msgs::LightningError> {
+pub fn get_route(send_node: &Node, route_params: &RouteParameters) -> Result<Route, msgs::LightningError> {
        let scorer = TestScorer::new();
        let keys_manager = TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
        router::get_route(
-               &send_node.node.get_our_node_id(), payment_params, &send_node.network_graph.read_only(),
+               &send_node.node.get_our_node_id(), route_params, &send_node.network_graph.read_only(),
                Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
-               recv_value, send_node.logger, &scorer, &(), &random_seed_bytes
+               send_node.logger, &scorer, &(), &random_seed_bytes
        )
 }
 
@@ -1854,9 +1858,10 @@ pub fn get_route(send_node: &Node, payment_params: &PaymentParameters, recv_valu
 /// Don't use this, use the identically-named function instead.
 #[macro_export]
 macro_rules! get_route {
-       ($send_node: expr, $payment_params: expr, $recv_value: expr) => {
-               $crate::ln::functional_test_utils::get_route(&$send_node, &$payment_params, $recv_value)
-       }
+       ($send_node: expr, $payment_params: expr, $recv_value: expr) => {{
+               let route_params = $crate::routing::router::RouteParameters::from_payment_params_and_value($payment_params, $recv_value);
+               $crate::ln::functional_test_utils::get_route(&$send_node, &route_params)
+       }}
 }
 
 #[cfg(test)]
@@ -1868,9 +1873,10 @@ macro_rules! get_route_and_payment_hash {
                $crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value)
        }};
        ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr) => {{
+               let route_params = $crate::routing::router::RouteParameters::from_payment_params_and_value($payment_params, $recv_value);
                let (payment_preimage, payment_hash, payment_secret) =
                        $crate::ln::functional_test_utils::get_payment_preimage_hash(&$recv_node, Some($recv_value), None);
-               let route = $crate::ln::functional_test_utils::get_route(&$send_node, &$payment_params, $recv_value);
+               let route = $crate::ln::functional_test_utils::get_route(&$send_node, &route_params);
                (route.unwrap(), payment_hash, payment_preimage, payment_secret)
        }}
 }
@@ -2386,10 +2392,10 @@ pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, '
                                                let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
                                                        .unwrap().lock().unwrap();
                                                let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
-                                               if let Some(prev_config) = channel.context.prev_config() {
+                                               if let Some(prev_config) = channel.context().prev_config() {
                                                        prev_config.forwarding_fee_base_msat
                                                } else {
-                                                       channel.context.config().forwarding_fee_base_msat
+                                                       channel.context().config().forwarding_fee_base_msat
                                                }
                                        };
                                        if $idx == 1 { fee += expected_extra_fees[i]; }
@@ -2464,7 +2470,8 @@ pub const TEST_FINAL_CLTV: u32 = 70;
 pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
        let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(expected_route.last().unwrap().node.invoice_features()).unwrap();
-       let route = get_route(origin_node, &payment_params, recv_value).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value);
+       let route = get_route(origin_node, &route_params).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
@@ -2478,14 +2485,14 @@ pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route:
 pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64)  {
        let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(expected_route.last().unwrap().node.invoice_features()).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value);
        let network_graph = origin_node.network_graph.read_only();
        let scorer = test_utils::TestScorer::new();
        let seed = [0u8; 32];
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
-       let route = router::get_route(
-               &origin_node.node.get_our_node_id(), &payment_params, &network_graph,
-               None, recv_value, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route = router::get_route(&origin_node.node.get_our_node_id(), &route_params, &network_graph,
+               None, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
@@ -2646,7 +2653,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMon
        create_node_cfgs_with_persisters(node_count, chanmon_cfgs, chanmon_cfgs.iter().map(|c| &c.persister).collect())
 }
 
-pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>, persisters: Vec<&'a impl Persist<EnforcingSigner>>) -> Vec<NodeCfg<'a>> {
+pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>, persisters: Vec<&'a impl Persist<TestChannelSigner>>) -> Vec<NodeCfg<'a>> {
        let mut nodes = Vec::new();
 
        for i in 0..node_count {
@@ -2912,7 +2919,9 @@ macro_rules! get_channel_value_stat {
        ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{
                let peer_state_lock = $node.node.per_peer_state.read().unwrap();
                let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
-               let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap();
+               let chan = chan_lock.channel_by_id.get(&$channel_id).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                chan.get_value_stat()
        }}
 }
index 506cb73d9baa28b1f0341b31dea81a6e61678d4e..94c0a5a8ebd9456f0db7a0de9a00d04f2a5f0472 100644 (file)
@@ -20,17 +20,17 @@ use crate::chain::transaction::OutPoint;
 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
-use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
+use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils::{self, WatchtowerPersister};
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -181,14 +181,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               if send_from_initiator {
-                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-                       chan.context.holder_selected_channel_reserve_satoshis = 0;
-                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
-               } else {
-                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-                       chan.context.holder_selected_channel_reserve_satoshis = 0;
-                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+
+               let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+               match channel_phase {
+                       ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+                               let chan_context = channel_phase.context_mut();
+                               chan_context.holder_selected_channel_reserve_satoshis = 0;
+                               chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+                       },
+                       ChannelPhase::Funded(_) => assert!(false),
                }
        }
 
@@ -696,12 +697,14 @@ fn test_update_fee_that_funder_cannot_afford() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = local_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
@@ -710,7 +713,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = remote_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@@ -725,7 +730,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let res = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let local_chan_signer = local_chan.get_signer();
                let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
@@ -1049,7 +1056,9 @@ fn fake_network_test() {
        });
        hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
        hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
-       let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
+       let payment_preimage_1 = send_along_route(&nodes[1],
+               Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
+                       &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
 
        let mut hops = Vec::with_capacity(3);
        hops.push(RouteHop {
@@ -1078,7 +1087,9 @@ fn fake_network_test() {
        });
        hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
        hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
-       let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![Path { hops, blinded_tail: None }], payment_params: None }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
+       let payment_hash_2 = send_along_route(&nodes[1],
+               Route { paths: vec![Path { hops, blinded_tail: None }], route_params: None },
+                       &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
 
        // Claim the rebalances...
        fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
@@ -1409,12 +1420,14 @@ fn test_fee_spike_violation_fails_htlc() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = local_chan.get_signer();
                // Make the signer believe we validated another commitment, so we can release the secret
                chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
@@ -1428,7 +1441,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = remote_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@@ -1457,7 +1472,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let res = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let local_chan_signer = local_chan.get_signer();
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
                        commitment_number,
@@ -3203,7 +3220,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
                // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
                nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
-                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
+                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
        } else { 3000000 };
 
        let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
@@ -5101,7 +5118,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
        // 0th HTLC:
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
@@ -6211,7 +6228,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
        let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
 
        // Fetch a route in advance as we will be unable to once we're unable to send.
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -6284,7 +6301,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
-               htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
+               htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
        }
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@@ -6887,7 +6904,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
 
        // We route 2 dust-HTLCs between A and B
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
@@ -6980,7 +6997,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
 
        let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@@ -7137,8 +7154,11 @@ fn test_check_htlc_underpaying() {
 
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
-       let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(),
+               TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
+       let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
+               None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
        let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
        let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@ -7394,12 +7414,14 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
-               3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
+       let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
+               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
        let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
        let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
-       let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
-               3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
+       let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
+               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
        send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
@@ -7639,7 +7661,7 @@ fn test_counterparty_raa_skip_no_crash() {
        // commitment transaction, we would have happily carried on and provided them the next
        // commitment transaction based on one RAA forward. This would probably eventually have led to
        // channel closure, but it would not have resulted in funds loss. Still, our
-       // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
+       // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
        // check simply that the channel is closed in response to such an RAA, but don't check whether
        // we decide to punish our counterparty for revoking their funds (as we don't currently
        // implement that).
@@ -7654,7 +7676,9 @@ fn test_counterparty_raa_skip_no_crash() {
        {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
+               let keys = guard.channel_by_id.get_mut(&channel_id).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap().get_signer();
 
                const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -8350,7 +8374,7 @@ fn test_update_err_monitor_lockdown() {
        let watchtower = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8376,11 +8400,14 @@ fn test_update_err_monitor_lockdown() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
@@ -8420,7 +8447,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_alice = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8451,7 +8478,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_bob = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8474,13 +8501,16 @@ fn test_concurrent_monitor_claim() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Watchtower Alice should already have seen the block and reject the update
-                       assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
-                       assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               // Watchtower Alice should already have seen the block and reject the update
+                               assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+                               assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
@@ -8931,9 +8961,13 @@ fn test_duplicate_chan_id() {
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
-               let logger = test_utils::TestLogger::new();
-               as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                               let logger = test_utils::TestLogger::new();
+                               chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+                       },
+                       _ => panic!("Unexpected ChannelPhase variant"),
+               }
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -9133,6 +9167,66 @@ fn test_invalid_funding_tx() {
        mine_transaction(&nodes[1], &spend_tx);
 }
 
+#[test]
+fn test_coinbase_funding_tx() {
+       // Miners are able to fund channels directly from coinbase transactions, however
+       // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
+       // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
+       // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
+       //
+       // Note that 0conf channels with coinbase funding transactions are unaffected and are
+       // immediately operational after opening.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
+       let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+
+       // Create the coinbase funding transaction.
+       let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
+
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+       check_added_monitors!(nodes[0], 0);
+       let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
+       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+       // Starting at height 0, we "confirm" the coinbase at height 1.
+       confirm_transaction_at(&nodes[0], &tx, 1);
+       // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
+       connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
+       // Check that we have no pending message events (we have not queued a `channel_ready` yet).
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       // Now connect one more block which results in 100 confirmations of the coinbase transaction.
+       connect_blocks(&nodes[0], 1);
+       // There should now be a `channel_ready` which can be handled.
+       let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
+
+       confirm_transaction_at(&nodes[1], &tx, 1);
+       connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       connect_blocks(&nodes[1], 1);
+       expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+       create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+}
+
 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
        // In the first version of the chain::Confirm interface, after a refactor was made to not
        // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
@@ -9540,8 +9634,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
-               chan.context.holder_dust_limit_satoshis = 546;
+               match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
+                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                               chan.context.holder_dust_limit_satoshis = 546;
+                       },
+                       _ => panic!("Unexpected ChannelPhase variant"),
+               }
        }
 
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
@@ -9565,8 +9663,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               (chan.context.get_dust_buffer_feerate(None) as u64,
-               chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
+               (chan.context().get_dust_buffer_feerate(None) as u64,
+               chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
@@ -10025,7 +10123,7 @@ fn test_remove_expired_outbound_unfunded_channels() {
        let check_outbound_channel_existence = |should_exist: bool| {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+               assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
        };
 
        // Channel should exist without any timer ticks.
@@ -10076,7 +10174,7 @@ fn test_remove_expired_inbound_unfunded_channels() {
        let check_inbound_channel_existence = |should_exist: bool| {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+               assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
        };
 
        // Channel should exist without any timer ticks.
index dda7cc2b29a125d214e022b18511e4cfc78fd031..f9e10880afbe5f5eae122d48220f5b8e5dcd9757 100644 (file)
@@ -19,7 +19,7 @@ use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 use crate::ln::msgs;
 use crate::ln::msgs::MAX_VALUE_MSAT;
 use crate::util::chacha20::ChaCha20;
-use crate::util::crypto::hkdf_extract_expand_4x;
+use crate::util::crypto::hkdf_extract_expand_5x;
 use crate::util::errors::APIError;
 use crate::util::logger::Logger;
 
@@ -50,6 +50,8 @@ pub struct ExpandedKey {
        user_pmt_hash_key: [u8; 32],
        /// The base key used to derive signing keys and authenticate messages for BOLT 12 Offers.
        offers_base_key: [u8; 32],
+       /// The key used to encrypt message metadata for BOLT 12 Offers.
+       offers_encryption_key: [u8; 32],
 }
 
 impl ExpandedKey {
@@ -57,20 +59,25 @@ impl ExpandedKey {
        ///
        /// It is recommended to cache this value and not regenerate it for each new inbound payment.
        pub fn new(key_material: &KeyMaterial) -> ExpandedKey {
-               let (metadata_key, ldk_pmt_hash_key, user_pmt_hash_key, offers_base_key) =
-                       hkdf_extract_expand_4x(b"LDK Inbound Payment Key Expansion", &key_material.0);
+               let (
+                       metadata_key,
+                       ldk_pmt_hash_key,
+                       user_pmt_hash_key,
+                       offers_base_key,
+                       offers_encryption_key,
+               ) = hkdf_extract_expand_5x(b"LDK Inbound Payment Key Expansion", &key_material.0);
                Self {
                        metadata_key,
                        ldk_pmt_hash_key,
                        user_pmt_hash_key,
                        offers_base_key,
+                       offers_encryption_key,
                }
        }
 
        /// Returns an [`HmacEngine`] used to construct [`Offer::metadata`].
        ///
        /// [`Offer::metadata`]: crate::offers::offer::Offer::metadata
-       #[allow(unused)]
        pub(crate) fn hmac_for_offer(
                &self, nonce: Nonce, iv_bytes: &[u8; IV_LEN]
        ) -> HmacEngine<Sha256> {
@@ -79,6 +86,13 @@ impl ExpandedKey {
                hmac.input(&nonce.0);
                hmac
        }
+
+       /// Encrypts or decrypts the given `bytes`. Used for data included in an offer message's
+       /// metadata (e.g., payment id).
+       pub(crate) fn crypt_for_offer(&self, mut bytes: [u8; 32], nonce: Nonce) -> [u8; 32] {
+               ChaCha20::encrypt_single_block_in_place(&self.offers_encryption_key, &nonce.0, &mut bytes);
+               bytes
+       }
 }
 
 /// A 128-bit number used only once.
@@ -88,7 +102,6 @@ impl ExpandedKey {
 ///
 /// [`Offer::metadata`]: crate::offers::offer::Offer::metadata
 /// [`Offer::signing_pubkey`]: crate::offers::offer::Offer::signing_pubkey
-#[allow(unused)]
 #[derive(Clone, Copy, Debug, PartialEq)]
 pub(crate) struct Nonce(pub(crate) [u8; Self::LENGTH]);
 
@@ -271,10 +284,9 @@ fn construct_payment_secret(iv_bytes: &[u8; IV_LEN], metadata_bytes: &[u8; METAD
        let (iv_slice, encrypted_metadata_slice) = payment_secret_bytes.split_at_mut(IV_LEN);
        iv_slice.copy_from_slice(iv_bytes);
 
-       let chacha_block = ChaCha20::get_single_block(metadata_key, iv_bytes);
-       for i in 0..METADATA_LEN {
-               encrypted_metadata_slice[i] = chacha_block[i] ^ metadata_bytes[i];
-       }
+       ChaCha20::encrypt_single_block(
+               metadata_key, iv_bytes, encrypted_metadata_slice, metadata_bytes
+       );
        PaymentSecret(payment_secret_bytes)
 }
 
@@ -406,11 +418,10 @@ fn decrypt_metadata(payment_secret: PaymentSecret, keys: &ExpandedKey) -> ([u8;
        let (iv_slice, encrypted_metadata_bytes) = payment_secret.0.split_at(IV_LEN);
        iv_bytes.copy_from_slice(iv_slice);
 
-       let chacha_block = ChaCha20::get_single_block(&keys.metadata_key, &iv_bytes);
        let mut metadata_bytes: [u8; METADATA_LEN] = [0; METADATA_LEN];
-       for i in 0..METADATA_LEN {
-               metadata_bytes[i] = chacha_block[i] ^ encrypted_metadata_bytes[i];
-       }
+       ChaCha20::encrypt_single_block(
+               &keys.metadata_key, &iv_bytes, &mut metadata_bytes, encrypted_metadata_bytes
+       );
 
        (iv_bytes, metadata_bytes)
 }
index 89ded2168bcc33bd8d4c1622dab6986231967671..6bd5ec3f7293fcac5c043bf3437e47eb90127d9d 100644 (file)
@@ -37,14 +37,17 @@ use crate::ln::onion_utils;
 use crate::onion_message;
 
 use crate::prelude::*;
+use core::convert::TryFrom;
 use core::fmt;
 use core::fmt::Debug;
+use core::str::FromStr;
 use crate::io::{self, Read};
 use crate::io_extras::read_to_end;
 
 use crate::events::{MessageSendEventsProvider, OnionMessageProvider};
 use crate::util::logger;
 use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
+use crate::util::base32;
 
 use crate::routing::gossip::{NodeAlias, NodeId};
 
@@ -98,7 +101,7 @@ pub struct Init {
        /// message. A node can decide to use that information to discover a potential update to its
        /// public IPv4 address (NAT) and use that for a [`NodeAnnouncement`] update message containing
        /// the new address.
-       pub remote_network_address: Option<NetAddress>,
+       pub remote_network_address: Option<SocketAddress>,
 }
 
 /// An [`error`] message to be sent to or received from a peer.
@@ -746,16 +749,16 @@ pub struct AnnouncementSignatures {
 
 /// An address which can be used to connect to a remote peer.
 #[derive(Clone, Debug, PartialEq, Eq)]
-pub enum NetAddress {
-       /// An IPv4 address/port on which the peer is listening.
-       IPv4 {
+pub enum SocketAddress {
+       /// An IPv4 address and port on which the peer is listening.
+       TcpIpV4 {
                /// The 4-byte IPv4 address
                addr: [u8; 4],
                /// The port on which the node is listening
                port: u16,
        },
-       /// An IPv6 address/port on which the peer is listening.
-       IPv6 {
+       /// An IPv6 address and port on which the peer is listening.
+       TcpIpV6 {
                /// The 16-byte IPv6 address
                addr: [u8; 16],
                /// The port on which the node is listening
@@ -788,28 +791,28 @@ pub enum NetAddress {
                port: u16,
        },
 }
-impl NetAddress {
+impl SocketAddress {
        /// Gets the ID of this address type. Addresses in [`NodeAnnouncement`] messages should be sorted
        /// by this.
        pub(crate) fn get_id(&self) -> u8 {
                match self {
-                       &NetAddress::IPv4 {..} => { 1 },
-                       &NetAddress::IPv6 {..} => { 2 },
-                       &NetAddress::OnionV2(_) => { 3 },
-                       &NetAddress::OnionV3 {..} => { 4 },
-                       &NetAddress::Hostname {..} => { 5 },
+                       &SocketAddress::TcpIpV4 {..} => { 1 },
+                       &SocketAddress::TcpIpV6 {..} => { 2 },
+                       &SocketAddress::OnionV2(_) => { 3 },
+                       &SocketAddress::OnionV3 {..} => { 4 },
+                       &SocketAddress::Hostname {..} => { 5 },
                }
        }
 
        /// Strict byte-length of address descriptor, 1-byte type not recorded
        fn len(&self) -> u16 {
                match self {
-                       &NetAddress::IPv4 { .. } => { 6 },
-                       &NetAddress::IPv6 { .. } => { 18 },
-                       &NetAddress::OnionV2(_) => { 12 },
-                       &NetAddress::OnionV3 { .. } => { 37 },
+                       &SocketAddress::TcpIpV4 { .. } => { 6 },
+                       &SocketAddress::TcpIpV6 { .. } => { 18 },
+                       &SocketAddress::OnionV2(_) => { 12 },
+                       &SocketAddress::OnionV3 { .. } => { 37 },
                        // Consists of 1-byte hostname length, hostname bytes, and 2-byte port.
-                       &NetAddress::Hostname { ref hostname, .. } => { u16::from(hostname.len()) + 3 },
+                       &SocketAddress::Hostname { ref hostname, .. } => { u16::from(hostname.len()) + 3 },
                }
        }
 
@@ -819,31 +822,31 @@ impl NetAddress {
        pub(crate) const MAX_LEN: u16 = 258;
 }
 
-impl Writeable for NetAddress {
+impl Writeable for SocketAddress {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self {
-                       &NetAddress::IPv4 { ref addr, ref port } => {
+                       &SocketAddress::TcpIpV4 { ref addr, ref port } => {
                                1u8.write(writer)?;
                                addr.write(writer)?;
                                port.write(writer)?;
                        },
-                       &NetAddress::IPv6 { ref addr, ref port } => {
+                       &SocketAddress::TcpIpV6 { ref addr, ref port } => {
                                2u8.write(writer)?;
                                addr.write(writer)?;
                                port.write(writer)?;
                        },
-                       &NetAddress::OnionV2(bytes) => {
+                       &SocketAddress::OnionV2(bytes) => {
                                3u8.write(writer)?;
                                bytes.write(writer)?;
                        },
-                       &NetAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
+                       &SocketAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
                                4u8.write(writer)?;
                                ed25519_pubkey.write(writer)?;
                                checksum.write(writer)?;
                                version.write(writer)?;
                                port.write(writer)?;
                        },
-                       &NetAddress::Hostname { ref hostname, ref port } => {
+                       &SocketAddress::Hostname { ref hostname, ref port } => {
                                5u8.write(writer)?;
                                hostname.write(writer)?;
                                port.write(writer)?;
@@ -853,25 +856,25 @@ impl Writeable for NetAddress {
        }
 }
 
-impl Readable for Result<NetAddress, u8> {
-       fn read<R: Read>(reader: &mut R) -> Result<Result<NetAddress, u8>, DecodeError> {
+impl Readable for Result<SocketAddress, u8> {
+       fn read<R: Read>(reader: &mut R) -> Result<Result<SocketAddress, u8>, DecodeError> {
                let byte = <u8 as Readable>::read(reader)?;
                match byte {
                        1 => {
-                               Ok(Ok(NetAddress::IPv4 {
+                               Ok(Ok(SocketAddress::TcpIpV4 {
                                        addr: Readable::read(reader)?,
                                        port: Readable::read(reader)?,
                                }))
                        },
                        2 => {
-                               Ok(Ok(NetAddress::IPv6 {
+                               Ok(Ok(SocketAddress::TcpIpV6 {
                                        addr: Readable::read(reader)?,
                                        port: Readable::read(reader)?,
                                }))
                        },
-                       3 => Ok(Ok(NetAddress::OnionV2(Readable::read(reader)?))),
+                       3 => Ok(Ok(SocketAddress::OnionV2(Readable::read(reader)?))),
                        4 => {
-                               Ok(Ok(NetAddress::OnionV3 {
+                               Ok(Ok(SocketAddress::OnionV3 {
                                        ed25519_pubkey: Readable::read(reader)?,
                                        checksum: Readable::read(reader)?,
                                        version: Readable::read(reader)?,
@@ -879,7 +882,7 @@ impl Readable for Result<NetAddress, u8> {
                                }))
                        },
                        5 => {
-                               Ok(Ok(NetAddress::Hostname {
+                               Ok(Ok(SocketAddress::Hostname {
                                        hostname: Readable::read(reader)?,
                                        port: Readable::read(reader)?,
                                }))
@@ -889,8 +892,8 @@ impl Readable for Result<NetAddress, u8> {
        }
 }
 
-impl Readable for NetAddress {
-       fn read<R: Read>(reader: &mut R) -> Result<NetAddress, DecodeError> {
+impl Readable for SocketAddress {
+       fn read<R: Read>(reader: &mut R) -> Result<SocketAddress, DecodeError> {
                match Readable::read(reader) {
                        Ok(Ok(res)) => Ok(res),
                        Ok(Err(_)) => Err(DecodeError::UnknownVersion),
@@ -899,6 +902,104 @@ impl Readable for NetAddress {
        }
 }
 
+/// [`SocketAddress`] error variants
+#[derive(Debug, Eq, PartialEq, Clone)]
+pub enum SocketAddressParseError {
+       /// Socket address (IPv4/IPv6) parsing error
+       SocketAddrParse,
+       /// Invalid input format
+       InvalidInput,
+       /// Invalid port
+       InvalidPort,
+       /// Invalid onion v3 address
+       InvalidOnionV3,
+}
+
+impl fmt::Display for SocketAddressParseError {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               match self {
+                       SocketAddressParseError::SocketAddrParse => write!(f, "Socket address (IPv4/IPv6) parsing error"),
+                       SocketAddressParseError::InvalidInput => write!(f, "Invalid input format. \
+                               Expected: \"<ipv4>:<port>\", \"[<ipv6>]:<port>\", \"<onion address>.onion:<port>\" or \"<hostname>:<port>\""),
+                       SocketAddressParseError::InvalidPort => write!(f, "Invalid port"),
+                       SocketAddressParseError::InvalidOnionV3 => write!(f, "Invalid onion v3 address"),
+               }
+       }
+}
+
+#[cfg(feature = "std")]
+impl From<std::net::SocketAddrV4> for SocketAddress {
+               fn from(addr: std::net::SocketAddrV4) -> Self {
+                       SocketAddress::TcpIpV4 { addr: addr.ip().octets(), port: addr.port() }
+               }
+}
+
+#[cfg(feature = "std")]
+impl From<std::net::SocketAddrV6> for SocketAddress {
+               fn from(addr: std::net::SocketAddrV6) -> Self {
+                       SocketAddress::TcpIpV6 { addr: addr.ip().octets(), port: addr.port() }
+               }
+}
+
+#[cfg(feature = "std")]
+impl From<std::net::SocketAddr> for SocketAddress {
+               fn from(addr: std::net::SocketAddr) -> Self {
+                       match addr {
+                               std::net::SocketAddr::V4(addr) => addr.into(),
+                               std::net::SocketAddr::V6(addr) => addr.into(),
+                       }
+               }
+}
+
+fn parse_onion_address(host: &str, port: u16) -> Result<SocketAddress, SocketAddressParseError> {
+       if host.ends_with(".onion") {
+               let domain = &host[..host.len() - ".onion".len()];
+               if domain.len() != 56 {
+                       return Err(SocketAddressParseError::InvalidOnionV3);
+               }
+               let onion =  base32::Alphabet::RFC4648 { padding: false }.decode(&domain).map_err(|_| SocketAddressParseError::InvalidOnionV3)?;
+               if onion.len() != 35 {
+                       return Err(SocketAddressParseError::InvalidOnionV3);
+               }
+               let version = onion[0];
+               let first_checksum_flag = onion[1];
+               let second_checksum_flag = onion[2];
+               let mut ed25519_pubkey = [0; 32];
+               ed25519_pubkey.copy_from_slice(&onion[3..35]);
+               let checksum = u16::from_be_bytes([first_checksum_flag, second_checksum_flag]);
+               return Ok(SocketAddress::OnionV3 { ed25519_pubkey, checksum, version, port });
+
+       } else {
+               return Err(SocketAddressParseError::InvalidInput);
+       }
+}
+
+#[cfg(feature = "std")]
+impl FromStr for SocketAddress {
+       type Err = SocketAddressParseError;
+
+       fn from_str(s: &str) -> Result<Self, Self::Err> {
+               match std::net::SocketAddr::from_str(s) {
+                       Ok(addr) => Ok(addr.into()),
+                       Err(_) => {
+                               let trimmed_input = match s.rfind(":") {
+                                       Some(pos) => pos,
+                                       None => return Err(SocketAddressParseError::InvalidInput),
+                               };
+                               let host = &s[..trimmed_input];
+                               let port: u16 = s[trimmed_input + 1..].parse().map_err(|_| SocketAddressParseError::InvalidPort)?;
+                               if host.ends_with(".onion") {
+                                       return parse_onion_address(host, port);
+                               };
+                               if let Ok(hostname) = Hostname::try_from(s[..trimmed_input].to_string()) {
+                                       return Ok(SocketAddress::Hostname { hostname, port });
+                               };
+                               return Err(SocketAddressParseError::SocketAddrParse)
+                       },
+               }
+       }
+}
+
 /// Represents the set of gossip messages that require a signature from a node's identity key.
 pub enum UnsignedGossipMessage<'a> {
        /// An unsigned channel announcement.
@@ -938,7 +1039,7 @@ pub struct UnsignedNodeAnnouncement {
        /// This should be sanitized before use. There is no guarantee of uniqueness.
        pub alias: NodeAlias,
        /// List of addresses on which this node is reachable
-       pub addresses: Vec<NetAddress>,
+       pub addresses: Vec<SocketAddress>,
        pub(crate) excess_address_data: Vec<u8>,
        pub(crate) excess_data: Vec<u8>,
 }
@@ -1229,7 +1330,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider {
        /// Handle an incoming `channel_ready` message from the given peer.
        fn handle_channel_ready(&self, their_node_id: &PublicKey, msg: &ChannelReady);
 
-       // Channl close:
+       // Channel close:
        /// Handle an incoming `shutdown` message from the given peer.
        fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown);
        /// Handle an incoming `closing_signed` message from the given peer.
@@ -1773,7 +1874,7 @@ impl Readable for Init {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let global_features: InitFeatures = Readable::read(r)?;
                let features: InitFeatures = Readable::read(r)?;
-               let mut remote_network_address: Option<NetAddress> = None;
+               let mut remote_network_address: Option<SocketAddress> = None;
                let mut networks: Option<WithoutLength<Vec<ChainHash>>> = None;
                decode_tlv_stream!(r, {
                        (1, networks, option),
@@ -2272,7 +2373,7 @@ impl Readable for UnsignedNodeAnnouncement {
                let alias: NodeAlias = Readable::read(r)?;
 
                let addr_len: u16 = Readable::read(r)?;
-               let mut addresses: Vec<NetAddress> = Vec::new();
+               let mut addresses: Vec<SocketAddress> = Vec::new();
                let mut addr_readpos = 0;
                let mut excess = false;
                let mut excess_byte = 0;
@@ -2471,6 +2572,7 @@ impl_writeable_msg!(GossipTimestampFilter, {
 
 #[cfg(test)]
 mod tests {
+       use std::convert::TryFrom;
        use bitcoin::blockdata::constants::ChainHash;
        use bitcoin::{Transaction, PackedLockTime, TxIn, Script, Sequence, Witness, TxOut};
        use hex;
@@ -2478,6 +2580,7 @@ mod tests {
        use crate::ln::ChannelId;
        use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
        use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket};
+       use crate::ln::msgs::SocketAddress;
        use crate::routing::gossip::{NodeAlias, NodeId};
        use crate::util::ser::{Writeable, Readable, Hostname, TransactionU16LenLimited};
 
@@ -2493,11 +2596,13 @@ mod tests {
 
        use crate::io::{self, Cursor};
        use crate::prelude::*;
-       use core::convert::TryFrom;
        use core::str::FromStr;
-
        use crate::chain::transaction::OutPoint;
 
+       #[cfg(feature = "std")]
+       use std::net::{Ipv4Addr, Ipv6Addr};
+       use crate::ln::msgs::SocketAddressParseError;
+
        #[test]
        fn encoding_channel_reestablish() {
                let public_key = {
@@ -2663,24 +2768,24 @@ mod tests {
                };
                let mut addresses = Vec::new();
                if ipv4 {
-                       addresses.push(msgs::NetAddress::IPv4 {
+                       addresses.push(SocketAddress::TcpIpV4 {
                                addr: [255, 254, 253, 252],
                                port: 9735
                        });
                }
                if ipv6 {
-                       addresses.push(msgs::NetAddress::IPv6 {
+                       addresses.push(SocketAddress::TcpIpV6 {
                                addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
                                port: 9735
                        });
                }
                if onionv2 {
-                       addresses.push(msgs::NetAddress::OnionV2(
+                       addresses.push(msgs::SocketAddress::OnionV2(
                                [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]
                        ));
                }
                if onionv3 {
-                       addresses.push(msgs::NetAddress::OnionV3 {
+                       addresses.push(msgs::SocketAddress::OnionV3 {
                                ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224],
                                checksum: 32,
                                version: 16,
@@ -2688,7 +2793,7 @@ mod tests {
                        });
                }
                if hostname {
-                       addresses.push(msgs::NetAddress::Hostname {
+                       addresses.push(SocketAddress::Hostname {
                                hostname: Hostname::try_from(String::from("host")).unwrap(),
                                port: 9735,
                        });
@@ -3296,10 +3401,10 @@ mod tests {
                let shutdown = msgs::Shutdown {
                        channel_id: ChannelId::from_bytes([2; 32]),
                        scriptpubkey:
-                                    if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() }
+                               if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() }
                                else if script_type == 2 { Address::p2sh(&script, Network::Testnet).unwrap().script_pubkey() }
                                else if script_type == 3 { Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).unwrap().script_pubkey() }
-                               else                     { Address::p2wsh(&script, Network::Testnet).script_pubkey() },
+                               else { Address::p2wsh(&script, Network::Testnet).script_pubkey() },
                };
                let encoded_value = shutdown.encode();
                let mut target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap();
@@ -3504,7 +3609,7 @@ mod tests {
                }.encode(), hex::decode("00000000014001010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap());
                let init_msg = msgs::Init { features: InitFeatures::from_le_bytes(vec![]),
                        networks: Some(vec![mainnet_hash]),
-                       remote_network_address: Some(msgs::NetAddress::IPv4 {
+                       remote_network_address: Some(SocketAddress::TcpIpV4 {
                                addr: [127, 0, 0, 1],
                                port: 1000,
                        }),
@@ -3869,4 +3974,47 @@ mod tests {
                }
                Ok(encoded_payload)
        }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_socket_address_from_str() {
+               assert_eq!(SocketAddress::TcpIpV4 {
+                       addr: Ipv4Addr::new(127, 0, 0, 1).octets(),
+                       port: 1234,
+               }, SocketAddress::from_str("127.0.0.1:1234").unwrap());
+
+               assert_eq!(SocketAddress::TcpIpV6 {
+                       addr: Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).octets(),
+                       port: 1234,
+               }, SocketAddress::from_str("[0:0:0:0:0:0:0:1]:1234").unwrap());
+               assert_eq!(
+                       SocketAddress::Hostname {
+                               hostname: Hostname::try_from("lightning-node.mydomain.com".to_string()).unwrap(),
+                               port: 1234,
+                       }, SocketAddress::from_str("lightning-node.mydomain.com:1234").unwrap());
+               assert_eq!(
+                       SocketAddress::Hostname {
+                               hostname: Hostname::try_from("example.com".to_string()).unwrap(),
+                               port: 1234,
+                       }, SocketAddress::from_str("example.com:1234").unwrap());
+               assert_eq!(SocketAddress::OnionV3 {
+                       ed25519_pubkey: [37, 24, 75, 5, 25, 73, 117, 194, 139, 102, 182, 107, 4, 105, 247, 246, 85,
+                       111, 177, 172, 49, 137, 167, 155, 64, 221, 163, 47, 31, 33, 71, 3],
+                       checksum: 48326,
+                       version: 121,
+                       port: 1234
+               }, SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:1234").unwrap());
+               assert_eq!(Err(SocketAddressParseError::InvalidOnionV3), SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6.onion:1234"));
+               assert_eq!(Err(SocketAddressParseError::InvalidInput), SocketAddress::from_str("127.0.0.1@1234"));
+               assert_eq!(Err(SocketAddressParseError::InvalidInput), "".parse::<SocketAddress>());
+               assert!(SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:9735:94").is_err());
+               assert!(SocketAddress::from_str("wrong$%#.com:1234").is_err());
+               assert_eq!(Err(SocketAddressParseError::InvalidPort), SocketAddress::from_str("example.com:wrong"));
+               assert!("localhost".parse::<SocketAddress>().is_err());
+               assert!("localhost:invalid-port".parse::<SocketAddress>().is_err());
+               assert!( "invalid-onion-v3-hostname.onion:8080".parse::<SocketAddress>().is_err());
+               assert!("b32.example.onion:invalid-port".parse::<SocketAddress>().is_err());
+               assert!("invalid-address".parse::<SocketAddress>().is_err());
+               assert!(SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:1234").is_err());
+       }
 }
index d326c1aab74441f1203f532ea48a333b85f8c375..3731c31c5b42c372663f29e31d30b6f6207ea811 100644 (file)
@@ -19,7 +19,7 @@ use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
 use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields};
 use crate::ln::onion_utils;
 use crate::routing::gossip::{NetworkUpdate, RoutingFees};
-use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop};
+use crate::routing::router::{get_route, PaymentParameters, Route, RouteParameters, RouteHint, RouteHintHop};
 use crate::ln::features::{InitFeatures, Bolt11InvoiceFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
@@ -516,7 +516,7 @@ fn test_onion_failure() {
        let short_channel_id = channels[1].0.contents.short_channel_id;
        let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
                .unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap()
-               .context.get_counterparty_htlc_minimum_msat() - 1;
+               .context().get_counterparty_htlc_minimum_msat() - 1;
        let mut bogus_route = route.clone();
        let route_len = bogus_route.paths[0].hops.len();
        bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;
@@ -1048,10 +1048,11 @@ macro_rules! get_phantom_route {
                ])]).unwrap();
                let scorer = test_utils::TestScorer::new();
                let network_graph = $nodes[0].network_graph.read_only();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, $amt);
                (get_route(
-                       &$nodes[0].node.get_our_node_id(), &payment_params, &network_graph,
+                       &$nodes[0].node.get_our_node_id(), &route_params, &network_graph,
                        Some(&$nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
-                       $amt, $nodes[0].logger, &scorer, &(), &[0u8; 32]
+                       $nodes[0].logger, &scorer, &(), &[0u8; 32]
                ).unwrap(), phantom_route_hint.phantom_scid)
        }
 }}
index d55077770f3baae35e2dab9c2f95fd2136071a50..8fdbdefef6592766b52b9ad0cf142e55ad045990 100644 (file)
@@ -1007,7 +1007,7 @@ mod tests {
                                                short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // We fill in the payloads manually instead of generating them from RouteHops.
                                        },
                        ], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
 
                let onion_keys = super::construct_onion_keys(&secp_ctx, &route.paths[0], &get_test_session_key()).unwrap();
index f60bf565efa35ddadac28416f2a7da22233d4892..0cc9e7e0531fb3bb6d1bc0d7dd38bff274b8333e 100644 (file)
@@ -16,8 +16,9 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey};
 use crate::sign::{EntropySource, NodeSigner, Recipient};
 use crate::events::{self, PaymentFailureReason};
 use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, IDEMPOTENCY_TIMEOUT_TICKS, PaymentId};
+use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, PaymentId};
 use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason};
+use crate::offers::invoice::Bolt12Invoice;
 use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
 use crate::util::errors::APIError;
 use crate::util::logger::Logger;
@@ -32,12 +33,32 @@ use core::ops::Deref;
 use crate::prelude::*;
 use crate::sync::Mutex;
 
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the idempotency
+/// of payments by [`PaymentId`]. See [`OutboundPayments::remove_stale_payments`].
+///
+/// [`ChannelManager::timer_tick_occurred`]: crate::ln::channelmanager::ChannelManager::timer_tick_occurred
+pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until an invoice request without
+/// a response is timed out.
+///
+/// [`ChannelManager::timer_tick_occurred`]: crate::ln::channelmanager::ChannelManager::timer_tick_occurred
+const INVOICE_REQUEST_TIMEOUT_TICKS: u8 = 3;
+
 /// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
 /// and later, also stores information for retrying the payment.
 pub(crate) enum PendingOutboundPayment {
        Legacy {
                session_privs: HashSet<[u8; 32]>,
        },
+       AwaitingInvoice {
+               timer_ticks_without_response: u8,
+               retry_strategy: Retry,
+       },
+       InvoiceReceived {
+               payment_hash: PaymentHash,
+               retry_strategy: Retry,
+       },
        Retryable {
                retry_strategy: Option<Retry>,
                attempts: PaymentAttempts,
@@ -108,6 +129,12 @@ impl PendingOutboundPayment {
                        params.previously_failed_channels.push(scid);
                }
        }
+       fn is_awaiting_invoice(&self) -> bool {
+               match self {
+                       PendingOutboundPayment::AwaitingInvoice { .. } => true,
+                       _ => false,
+               }
+       }
        pub(super) fn is_fulfilled(&self) -> bool {
                match self {
                        PendingOutboundPayment::Fulfilled { .. } => true,
@@ -130,6 +157,8 @@ impl PendingOutboundPayment {
        fn payment_hash(&self) -> Option<PaymentHash> {
                match self {
                        PendingOutboundPayment::Legacy { .. } => None,
+                       PendingOutboundPayment::AwaitingInvoice { .. } => None,
+                       PendingOutboundPayment::InvoiceReceived { payment_hash, .. } => Some(*payment_hash),
                        PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
                        PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
                        PendingOutboundPayment::Abandoned { payment_hash, .. } => Some(*payment_hash),
@@ -142,8 +171,9 @@ impl PendingOutboundPayment {
                        PendingOutboundPayment::Legacy { session_privs } |
                                PendingOutboundPayment::Retryable { session_privs, .. } |
                                PendingOutboundPayment::Fulfilled { session_privs, .. } |
-                               PendingOutboundPayment::Abandoned { session_privs, .. }
-                       => session_privs,
+                               PendingOutboundPayment::Abandoned { session_privs, .. } => session_privs,
+                       PendingOutboundPayment::AwaitingInvoice { .. } |
+                               PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); return; },
                });
                let payment_hash = self.payment_hash();
                *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 };
@@ -158,6 +188,12 @@ impl PendingOutboundPayment {
                                payment_hash: *payment_hash,
                                reason: Some(reason)
                        };
+               } else if let PendingOutboundPayment::InvoiceReceived { payment_hash, .. } = self {
+                       *self = PendingOutboundPayment::Abandoned {
+                               session_privs: HashSet::new(),
+                               payment_hash: *payment_hash,
+                               reason: Some(reason)
+                       };
                }
        }
 
@@ -169,7 +205,9 @@ impl PendingOutboundPayment {
                                PendingOutboundPayment::Fulfilled { session_privs, .. } |
                                PendingOutboundPayment::Abandoned { session_privs, .. } => {
                                        session_privs.remove(session_priv)
-                               }
+                               },
+                       PendingOutboundPayment::AwaitingInvoice { .. } |
+                               PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
                };
                if remove_res {
                        if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
@@ -188,7 +226,9 @@ impl PendingOutboundPayment {
                        PendingOutboundPayment::Legacy { session_privs } |
                                PendingOutboundPayment::Retryable { session_privs, .. } => {
                                        session_privs.insert(session_priv)
-                               }
+                               },
+                       PendingOutboundPayment::AwaitingInvoice { .. } |
+                               PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
                        PendingOutboundPayment::Fulfilled { .. } => false,
                        PendingOutboundPayment::Abandoned { .. } => false,
                };
@@ -210,7 +250,9 @@ impl PendingOutboundPayment {
                                PendingOutboundPayment::Fulfilled { session_privs, .. } |
                                PendingOutboundPayment::Abandoned { session_privs, .. } => {
                                        session_privs.len()
-                               }
+                               },
+                       PendingOutboundPayment::AwaitingInvoice { .. } => 0,
+                       PendingOutboundPayment::InvoiceReceived { .. } => 0,
                }
        }
 }
@@ -223,7 +265,7 @@ pub enum Retry {
        /// Each attempt may be multiple HTLCs along multiple paths if the router decides to split up a
        /// retry, and may retry multiple failed HTLCs at once if they failed around the same time and
        /// were retried along a route from a single call to [`Router::find_route_with_id`].
-       Attempts(usize),
+       Attempts(u32),
        #[cfg(not(feature = "no-std"))]
        /// Time elapsed before abandoning retries for a payment. At least one attempt at payment is made;
        /// see [`PaymentParameters::expiry_time`] to avoid any attempt at payment after a specific time.
@@ -232,6 +274,19 @@ pub enum Retry {
        Timeout(core::time::Duration),
 }
 
+#[cfg(feature = "no-std")]
+impl_writeable_tlv_based_enum!(Retry,
+       ;
+       (0, Attempts)
+);
+
+#[cfg(not(feature = "no-std"))]
+impl_writeable_tlv_based_enum!(Retry,
+       ;
+       (0, Attempts),
+       (2, Timeout)
+);
+
 impl Retry {
        pub(crate) fn is_retryable_now(&self, attempts: &PaymentAttempts) -> bool {
                match (self, attempts) {
@@ -265,7 +320,7 @@ pub(crate) type PaymentAttempts = PaymentAttemptsUsingTime<ConfiguredTime>;
 pub(crate) struct PaymentAttemptsUsingTime<T: Time> {
        /// This count will be incremented only after the result of the attempt is known. When it's 0,
        /// it means the result of the first attempt is not known yet.
-       pub(crate) count: usize,
+       pub(crate) count: u32,
        /// This field is only used when retry is `Retry::Timeout` which is only build with feature std
        #[cfg(not(feature = "no-std"))]
        first_attempted_at: T,
@@ -401,6 +456,15 @@ pub enum PaymentSendFailure {
        },
 }
 
+/// An error when attempting to pay a BOLT 12 invoice.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(super) enum Bolt12PaymentError {
+       /// The invoice was not requested.
+       UnexpectedInvoice,
+       /// Payment for an invoice with the corresponding [`PaymentId`] was already initiated.
+       DuplicateInvoice,
+}
+
 /// Information which is provided, encrypted, to the payment recipient when sending HTLCs.
 ///
 /// This should generally be constructed with data communicated to us from the recipient (via a
@@ -638,6 +702,50 @@ impl OutboundPayments {
                }
        }
 
+       #[allow(unused)]
+       pub(super) fn send_payment_for_bolt12_invoice<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
+               &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R,
+               first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
+               best_block_height: u32, logger: &L,
+               pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
+               send_payment_along_path: SP,
+       ) -> Result<(), Bolt12PaymentError>
+       where
+               R::Target: Router,
+               ES::Target: EntropySource,
+               NS::Target: NodeSigner,
+               L::Target: Logger,
+               IH: Fn() -> InFlightHtlcs,
+               SP: Fn(SendAlongPathArgs) -> Result<(), APIError>,
+       {
+               let payment_hash = invoice.payment_hash();
+               match self.pending_outbound_payments.lock().unwrap().entry(payment_id) {
+                       hash_map::Entry::Occupied(entry) => match entry.get() {
+                               PendingOutboundPayment::AwaitingInvoice { retry_strategy, .. } => {
+                                       *entry.into_mut() = PendingOutboundPayment::InvoiceReceived {
+                                               payment_hash,
+                                               retry_strategy: *retry_strategy,
+                                       };
+                               },
+                               _ => return Err(Bolt12PaymentError::DuplicateInvoice),
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice),
+               };
+
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                       final_value_msat: invoice.amount_msats(),
+               };
+
+               self.find_route_and_send_payment(
+                       payment_hash, payment_id, route_params, router, first_hops, &inflight_htlcs,
+                       entropy_source, node_signer, best_block_height, logger, pending_events,
+                       &send_payment_along_path
+               );
+
+               Ok(())
+       }
+
        pub(super) fn check_retry_payments<R: Deref, ES: Deref, NS: Deref, SP, IH, FH, L: Deref>(
                &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
                best_block_height: u32,
@@ -672,14 +780,14 @@ impl OutboundPayments {
                        }
                        core::mem::drop(outbounds);
                        if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params {
-                               self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
+                               self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
                        } else { break }
                }
 
                let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                outbounds.retain(|pmt_id, pmt| {
                        let mut retain = true;
-                       if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 {
+                       if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_awaiting_invoice() {
                                pmt.mark_abandoned(PaymentFailureReason::RetriesExhausted);
                                if let PendingOutboundPayment::Abandoned { payment_hash, reason, .. } = pmt {
                                        pending_events.lock().unwrap().push_back((events::Event::PaymentFailed {
@@ -697,7 +805,8 @@ impl OutboundPayments {
        pub(super) fn needs_abandon(&self) -> bool {
                let outbounds = self.pending_outbound_payments.lock().unwrap();
                outbounds.iter().any(|(_, pmt)|
-                       !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled())
+                       !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled() &&
+                       !pmt.is_awaiting_invoice())
        }
 
        /// Errors immediately on [`RetryableSendFailure`] error conditions. Otherwise, further errors may
@@ -757,7 +866,7 @@ impl OutboundPayments {
                Ok(())
        }
 
-       fn retry_payment_internal<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
+       fn find_route_and_send_payment<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
                &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters,
                router: &R, first_hops: Vec<ChannelDetails>, inflight_htlcs: &IH, entropy_source: &ES,
                node_signer: &NS, best_block_height: u32, logger: &L,
@@ -799,12 +908,6 @@ impl OutboundPayments {
                        }
                }
 
-               const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
-               let mut onion_session_privs = Vec::with_capacity(route.paths.len());
-               for _ in 0..route.paths.len() {
-                       onion_session_privs.push(entropy_source.get_secure_random_bytes());
-               }
-
                macro_rules! abandon_with_entry {
                        ($payment: expr, $reason: expr) => {
                                $payment.get_mut().mark_abandoned($reason);
@@ -820,31 +923,74 @@ impl OutboundPayments {
                                }
                        }
                }
-               let (total_msat, recipient_onion, keysend_preimage) = {
+               let (total_msat, recipient_onion, keysend_preimage, onion_session_privs) = {
                        let mut outbounds = self.pending_outbound_payments.lock().unwrap();
                        match outbounds.entry(payment_id) {
                                hash_map::Entry::Occupied(mut payment) => {
-                                       let res = match payment.get() {
+                                       match payment.get() {
                                                PendingOutboundPayment::Retryable {
                                                        total_msat, keysend_preimage, payment_secret, payment_metadata,
                                                        custom_tlvs, pending_amt_msat, ..
                                                } => {
+                                                       const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
                                                        let retry_amt_msat = route.get_total_amount();
                                                        if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
                                                                log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat);
                                                                abandon_with_entry!(payment, PaymentFailureReason::UnexpectedError);
                                                                return
                                                        }
-                                                       (*total_msat, RecipientOnionFields {
-                                                                       payment_secret: *payment_secret,
-                                                                       payment_metadata: payment_metadata.clone(),
-                                                                       custom_tlvs: custom_tlvs.clone(),
-                                                               }, *keysend_preimage)
+
+                                                       if !payment.get().is_retryable_now() {
+                                                               log_error!(logger, "Retries exhausted for payment id {}", &payment_id);
+                                                               abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted);
+                                                               return
+                                                       }
+
+                                                       let total_msat = *total_msat;
+                                                       let recipient_onion = RecipientOnionFields {
+                                                               payment_secret: *payment_secret,
+                                                               payment_metadata: payment_metadata.clone(),
+                                                               custom_tlvs: custom_tlvs.clone(),
+                                                       };
+                                                       let keysend_preimage = *keysend_preimage;
+
+                                                       let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+                                                       for _ in 0..route.paths.len() {
+                                                               onion_session_privs.push(entropy_source.get_secure_random_bytes());
+                                                       }
+
+                                                       for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+                                                               assert!(payment.get_mut().insert(*session_priv_bytes, path));
+                                                       }
+
+                                                       payment.get_mut().increment_attempts();
+
+                                                       (total_msat, recipient_onion, keysend_preimage, onion_session_privs)
                                                },
                                                PendingOutboundPayment::Legacy { .. } => {
                                                        log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102");
                                                        return
                                                },
+                                               PendingOutboundPayment::AwaitingInvoice { .. } => {
+                                                       log_error!(logger, "Payment not yet sent");
+                                                       return
+                                               },
+                                               PendingOutboundPayment::InvoiceReceived { payment_hash, retry_strategy } => {
+                                                       let total_amount = route_params.final_value_msat;
+                                                       let recipient_onion = RecipientOnionFields {
+                                                               payment_secret: None,
+                                                               payment_metadata: None,
+                                                               custom_tlvs: vec![],
+                                                       };
+                                                       let retry_strategy = Some(*retry_strategy);
+                                                       let payment_params = Some(route_params.payment_params.clone());
+                                                       let (retryable_payment, onion_session_privs) = self.create_pending_payment(
+                                                               *payment_hash, recipient_onion.clone(), None, &route,
+                                                               retry_strategy, payment_params, entropy_source, best_block_height
+                                                       );
+                                                       *payment.into_mut() = retryable_payment;
+                                                       (total_amount, recipient_onion, None, onion_session_privs)
+                                               },
                                                PendingOutboundPayment::Fulfilled { .. } => {
                                                        log_error!(logger, "Payment already completed");
                                                        return
@@ -853,17 +999,7 @@ impl OutboundPayments {
                                                        log_error!(logger, "Payment already abandoned (with some HTLCs still pending)");
                                                        return
                                                },
-                                       };
-                                       if !payment.get().is_retryable_now() {
-                                               log_error!(logger, "Retries exhausted for payment id {}", &payment_id);
-                                               abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted);
-                                               return
                                        }
-                                       payment.get_mut().increment_attempts();
-                                       for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
-                                               assert!(payment.get_mut().insert(*session_priv_bytes, path));
-                                       }
-                                       res
                                },
                                hash_map::Entry::Vacant(_) => {
                                        log_error!(logger, "Payment with ID {} not found", &payment_id);
@@ -897,14 +1033,14 @@ impl OutboundPayments {
                match err {
                        PaymentSendFailure::AllFailedResendSafe(errs) => {
                                Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), logger, pending_events);
-                               self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+                               self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
                        },
                        PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => {
                                Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), logger, pending_events);
                                // Some paths were sent, even if we failed to send the full MPP value our recipient may
                                // misbehave and claim the funds, at which point we have to consider the payment sent, so
                                // return `Ok()` here, ignoring any retry errors.
-                               self.retry_payment_internal(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+                               self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
                        },
                        PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. } => {
                                // This may happen if we send a payment and some paths fail, but only due to a temporary
@@ -976,7 +1112,7 @@ impl OutboundPayments {
                        }))
                }
 
-               let route = Route { paths: vec![path], payment_params: None };
+               let route = Route { paths: vec![path], route_params: None };
                let onion_session_privs = self.add_new_pending_payment(payment_hash,
                        RecipientOnionFields::spontaneous_empty(), payment_id, None, &route, None, None,
                        entropy_source, best_block_height)?;
@@ -1017,36 +1153,70 @@ impl OutboundPayments {
                keysend_preimage: Option<PaymentPreimage>, route: &Route, retry_strategy: Option<Retry>,
                payment_params: Option<PaymentParameters>, entropy_source: &ES, best_block_height: u32
        ) -> Result<Vec<[u8; 32]>, PaymentSendFailure> where ES::Target: EntropySource {
+               let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+               match pending_outbounds.entry(payment_id) {
+                       hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+                       hash_map::Entry::Vacant(entry) => {
+                               let (payment, onion_session_privs) = self.create_pending_payment(
+                                       payment_hash, recipient_onion, keysend_preimage, route, retry_strategy,
+                                       payment_params, entropy_source, best_block_height
+                               );
+                               entry.insert(payment);
+                               Ok(onion_session_privs)
+                       },
+               }
+       }
+
+       fn create_pending_payment<ES: Deref>(
+               &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
+               keysend_preimage: Option<PaymentPreimage>, route: &Route, retry_strategy: Option<Retry>,
+               payment_params: Option<PaymentParameters>, entropy_source: &ES, best_block_height: u32
+       ) -> (PendingOutboundPayment, Vec<[u8; 32]>)
+       where
+               ES::Target: EntropySource,
+       {
                let mut onion_session_privs = Vec::with_capacity(route.paths.len());
                for _ in 0..route.paths.len() {
                        onion_session_privs.push(entropy_source.get_secure_random_bytes());
                }
 
+               let mut payment = PendingOutboundPayment::Retryable {
+                       retry_strategy,
+                       attempts: PaymentAttempts::new(),
+                       payment_params,
+                       session_privs: HashSet::new(),
+                       pending_amt_msat: 0,
+                       pending_fee_msat: Some(0),
+                       payment_hash,
+                       payment_secret: recipient_onion.payment_secret,
+                       payment_metadata: recipient_onion.payment_metadata,
+                       keysend_preimage,
+                       custom_tlvs: recipient_onion.custom_tlvs,
+                       starting_block_height: best_block_height,
+                       total_msat: route.get_total_amount(),
+               };
+
+               for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+                       assert!(payment.insert(*session_priv_bytes, path));
+               }
+
+               (payment, onion_session_privs)
+       }
+
+       #[allow(unused)]
+       pub(super) fn add_new_awaiting_invoice(
+               &self, payment_id: PaymentId, retry_strategy: Retry
+       ) -> Result<(), ()> {
                let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
                match pending_outbounds.entry(payment_id) {
-                       hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+                       hash_map::Entry::Occupied(_) => Err(()),
                        hash_map::Entry::Vacant(entry) => {
-                               let payment = entry.insert(PendingOutboundPayment::Retryable {
+                               entry.insert(PendingOutboundPayment::AwaitingInvoice {
+                                       timer_ticks_without_response: 0,
                                        retry_strategy,
-                                       attempts: PaymentAttempts::new(),
-                                       payment_params,
-                                       session_privs: HashSet::new(),
-                                       pending_amt_msat: 0,
-                                       pending_fee_msat: Some(0),
-                                       payment_hash,
-                                       payment_secret: recipient_onion.payment_secret,
-                                       payment_metadata: recipient_onion.payment_metadata,
-                                       keysend_preimage,
-                                       custom_tlvs: recipient_onion.custom_tlvs,
-                                       starting_block_height: best_block_height,
-                                       total_msat: route.get_total_amount(),
                                });
 
-                               for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
-                                       assert!(payment.insert(*session_priv_bytes, path));
-                               }
-
-                               Ok(onion_session_privs)
+                               Ok(())
                        },
                }
        }
@@ -1145,9 +1315,9 @@ impl OutboundPayments {
                                results,
                                payment_id,
                                failed_paths_retry: if pending_amt_unsent != 0 {
-                                       if let Some(payment_params) = &route.payment_params {
+                                       if let Some(payment_params) = route.route_params.as_ref().map(|p| p.payment_params.clone()) {
                                                Some(RouteParameters {
-                                                       payment_params: payment_params.clone(),
+                                                       payment_params: payment_params,
                                                        final_value_msat: pending_amt_unsent,
                                                })
                                        } else { None }
@@ -1256,19 +1426,19 @@ impl OutboundPayments {
                }
        }
 
-       pub(super) fn remove_stale_resolved_payments(&self,
-               pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>)
+       pub(super) fn remove_stale_payments(
+               &self, pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>)
        {
-               // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
-               // from the map. However, if we did that immediately when the last payment HTLC is claimed,
-               // this could race the user making a duplicate send_payment call and our idempotency
-               // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
-               // removal. This should be more than sufficient to ensure the idempotency of any
-               // `send_payment` calls that were made at the same time the `PaymentSent` event was being
-               // processed.
                let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
-               let pending_events = pending_events.lock().unwrap();
+               let mut pending_events = pending_events.lock().unwrap();
                pending_outbound_payments.retain(|payment_id, payment| {
+                       // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
+                       // from the map. However, if we did that immediately when the last payment HTLC is claimed,
+                       // this could race the user making a duplicate send_payment call and our idempotency
+                       // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
+                       // removal. This should be more than sufficient to ensure the idempotency of any
+                       // `send_payment` calls that were made at the same time the `PaymentSent` event was being
+                       // processed.
                        if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment {
                                let mut no_remaining_entries = session_privs.is_empty();
                                if no_remaining_entries {
@@ -1293,6 +1463,16 @@ impl OutboundPayments {
                                        *timer_ticks_without_htlcs = 0;
                                        true
                                }
+                       } else if let PendingOutboundPayment::AwaitingInvoice { timer_ticks_without_response, .. } = payment {
+                               *timer_ticks_without_response += 1;
+                               if *timer_ticks_without_response <= INVOICE_REQUEST_TIMEOUT_TICKS {
+                                       true
+                               } else {
+                                       pending_events.push_back(
+                                               (events::Event::InvoiceRequestFailed { payment_id: *payment_id }, None)
+                                       );
+                                       false
+                               }
                        } else { true }
                });
        }
@@ -1438,6 +1618,11 @@ impl OutboundPayments {
                                        }, None));
                                        payment.remove();
                                }
+                       } else if let PendingOutboundPayment::AwaitingInvoice { .. } = payment.get() {
+                               pending_events.lock().unwrap().push_back((events::Event::InvoiceRequestFailed {
+                                       payment_id,
+                               }, None));
+                               payment.remove();
                        }
                }
        }
@@ -1501,6 +1686,14 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment,
                (1, reason, option),
                (2, payment_hash, required),
        },
+       (5, AwaitingInvoice) => {
+               (0, timer_ticks_without_response, required),
+               (2, retry_strategy, required),
+       },
+       (7, InvoiceReceived) => {
+               (0, payment_hash, required),
+               (2, retry_strategy, required),
+       },
 );
 
 #[cfg(test)]
@@ -1513,7 +1706,10 @@ mod tests {
        use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
        use crate::ln::features::{ChannelFeatures, NodeFeatures};
        use crate::ln::msgs::{ErrorAction, LightningError};
-       use crate::ln::outbound_payment::{OutboundPayments, Retry, RetryableSendFailure};
+       use crate::ln::outbound_payment::{Bolt12PaymentError, INVOICE_REQUEST_TIMEOUT_TICKS, OutboundPayments, Retry, RetryableSendFailure};
+       use crate::offers::invoice::DEFAULT_RELATIVE_EXPIRY;
+       use crate::offers::offer::OfferBuilder;
+       use crate::offers::test_utils::*;
        use crate::routing::gossip::NetworkGraph;
        use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters};
        use crate::sync::{Arc, Mutex, RwLock};
@@ -1565,17 +1761,14 @@ mod tests {
                                PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()),
                                0
                        ).with_expiry_time(past_expiry_time);
-               let expired_route_params = RouteParameters {
-                       payment_params,
-                       final_value_msat: 0,
-               };
+               let expired_route_params = RouteParameters::from_payment_params_and_value(payment_params, 0);
                let pending_events = Mutex::new(VecDeque::new());
                if on_retry {
                        outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(),
-                               PaymentId([0; 32]), None, &Route { paths: vec![], payment_params: None },
+                               PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None },
                                Some(Retry::Attempts(1)), Some(expired_route_params.payment_params.clone()),
                                &&keys_manager, 0).unwrap();
-                       outbound_payments.retry_payment_internal(
+                       outbound_payments.find_route_and_send_payment(
                                PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![],
                                &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger, &pending_events,
                                &|_| Ok(()));
@@ -1609,20 +1802,17 @@ mod tests {
 
                let payment_params = PaymentParameters::from_node_id(
                        PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0);
-               let route_params = RouteParameters {
-                       payment_params,
-                       final_value_msat: 0,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 0);
                router.expect_find_route(route_params.clone(),
                        Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }));
 
                let pending_events = Mutex::new(VecDeque::new());
                if on_retry {
                        outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(),
-                               PaymentId([0; 32]), None, &Route { paths: vec![], payment_params: None },
+                               PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None },
                                Some(Retry::Attempts(1)), Some(route_params.payment_params.clone()),
                                &&keys_manager, 0).unwrap();
-                       outbound_payments.retry_payment_internal(
+                       outbound_payments.find_route_and_send_payment(
                                PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![],
                                &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger, &pending_events,
                                &|_| Ok(()));
@@ -1652,10 +1842,7 @@ mod tests {
                let sender_pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let receiver_pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
                let payment_params = PaymentParameters::from_node_id(sender_pk, 0);
-               let route_params = RouteParameters {
-                       payment_params: payment_params.clone(),
-                       final_value_msat: 0,
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params.clone(), 0);
                let failed_scid = 42;
                let route = Route {
                        paths: vec![Path { hops: vec![RouteHop {
@@ -1666,7 +1853,7 @@ mod tests {
                                fee_msat: 0,
                                cltv_expiry_delta: 0,
                        }], blinded_tail: None }],
-                       payment_params: Some(payment_params),
+                       route_params: Some(route_params.clone()),
                };
                router.expect_find_route(route_params.clone(), Ok(route.clone()));
                let mut route_params_w_failed_scid = route_params.clone();
@@ -1719,4 +1906,293 @@ mod tests {
                } else { panic!("Unexpected event"); }
                if let Event::PaymentFailed { .. } = events[1].0 { } else { panic!("Unexpected event"); }
        }
+
+       #[test]
+       fn removes_stale_awaiting_invoice() {
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               for _ in 0..INVOICE_REQUEST_TIMEOUT_TICKS {
+                       outbound_payments.remove_stale_payments(&pending_events);
+                       assert!(outbound_payments.has_pending_payments());
+                       assert!(pending_events.lock().unwrap().is_empty());
+               }
+
+               outbound_payments.remove_stale_payments(&pending_events);
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::InvoiceRequestFailed { payment_id }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_err());
+       }
+
+       #[test]
+       fn removes_abandoned_awaiting_invoice() {
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               outbound_payments.abandon_payment(
+                       payment_id, PaymentFailureReason::UserAbandoned, &pending_events
+               );
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::InvoiceRequestFailed { payment_id }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[cfg(feature = "std")]
+       #[test]
+       fn fails_sending_payment_for_expired_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               let created_at = now() - DEFAULT_RELATIVE_EXPIRY;
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), created_at).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Ok(()),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+
+               let payment_hash = invoice.payment_hash();
+               let reason = Some(PaymentFailureReason::PaymentExpired);
+
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[test]
+       fn fails_finding_route_for_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               router.expect_find_route(
+                       RouteParameters {
+                               payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                               final_value_msat: invoice.amount_msats(),
+                       },
+                       Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }),
+               );
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Ok(()),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+
+               let payment_hash = invoice.payment_hash();
+               let reason = Some(PaymentFailureReason::RouteNotFound);
+
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[test]
+       fn fails_paying_for_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                       final_value_msat: invoice.amount_msats(),
+               };
+               router.expect_find_route(
+                       route_params.clone(), Ok(Route { paths: vec![], route_params: Some(route_params) })
+               );
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Ok(()),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+
+               let payment_hash = invoice.payment_hash();
+               let reason = Some(PaymentFailureReason::UnexpectedError);
+
+               assert!(!pending_events.lock().unwrap().is_empty());
+               assert_eq!(
+                       pending_events.lock().unwrap().pop_front(),
+                       Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+               );
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
+
+       #[test]
+       fn sends_payment_for_bolt12_invoice() {
+               let logger = test_utils::TestLogger::new();
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+               let scorer = RwLock::new(test_utils::TestScorer::new());
+               let router = test_utils::TestRouter::new(network_graph, &scorer);
+               let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+               let pending_events = Mutex::new(VecDeque::new());
+               let outbound_payments = OutboundPayments::new();
+               let payment_id = PaymentId([0; 32]);
+
+               let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+                       .amount_msats(1000)
+                       .build().unwrap()
+                       .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+                       .build().unwrap()
+                       .sign(payer_sign).unwrap()
+                       .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+                       .build().unwrap()
+                       .sign(recipient_sign).unwrap();
+
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+                       final_value_msat: invoice.amount_msats(),
+               };
+               router.expect_find_route(
+                       route_params.clone(),
+                       Ok(Route {
+                               paths: vec![
+                                       Path {
+                                               hops: vec![
+                                                       RouteHop {
+                                                               pubkey: recipient_pubkey(),
+                                                               node_features: NodeFeatures::empty(),
+                                                               short_channel_id: 42,
+                                                               channel_features: ChannelFeatures::empty(),
+                                                               fee_msat: invoice.amount_msats(),
+                                                               cltv_expiry_delta: 0,
+                                                       }
+                                               ],
+                                               blinded_tail: None,
+                                       }
+                               ],
+                               route_params: Some(route_params),
+                       })
+               );
+
+               assert!(!outbound_payments.has_pending_payments());
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Err(Bolt12PaymentError::UnexpectedInvoice),
+               );
+               assert!(!outbound_payments.has_pending_payments());
+               assert!(pending_events.lock().unwrap().is_empty());
+
+               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(outbound_payments.has_pending_payments());
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| Ok(())
+                       ),
+                       Ok(()),
+               );
+               assert!(outbound_payments.has_pending_payments());
+               assert!(pending_events.lock().unwrap().is_empty());
+
+               assert_eq!(
+                       outbound_payments.send_payment_for_bolt12_invoice(
+                               &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+                               &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+                       ),
+                       Err(Bolt12PaymentError::DuplicateInvoice),
+               );
+               assert!(outbound_payments.has_pending_payments());
+               assert!(pending_events.lock().unwrap().is_empty());
+       }
 }
index f87fb867fc952bcd15856a601c3626aa1908acf9..0f78df5114ef164766796c9a1f95a3bc914591e0 100644 (file)
@@ -16,12 +16,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATE
 use crate::sign::EntropySource;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose};
-use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS};
-use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
+use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
+use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
 use crate::ln::features::Bolt11InvoiceFeatures;
 use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
 use crate::ln::msgs::ChannelMessageHandler;
-use crate::ln::outbound_payment::Retry;
+use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
 use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
 use crate::routing::scoring::ChannelUsage;
@@ -94,10 +94,7 @@ fn mpp_retry() {
 
        // Initiate the MPP payment.
        let payment_id = PaymentId(payment_hash.0);
-       let mut route_params = RouteParameters {
-               payment_params: route.payment_params.clone().unwrap(),
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = route.route_params.clone().unwrap();
 
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -257,10 +254,8 @@ fn do_test_keysend_payments(public_node: bool, with_retry: bool) {
        }
        let payer_pubkey = nodes[0].node.get_our_node_id();
        let payee_pubkey = nodes[1].node.get_our_node_id();
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-               final_value_msat: 10000,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(
+               PaymentParameters::for_keysend(payee_pubkey, 40, false), 10000);
 
        let network_graph = nodes[0].network_graph.clone();
        let channels = nodes[0].node.list_usable_channels();
@@ -319,10 +314,8 @@ fn test_mpp_keysend() {
        let payer_pubkey = nodes[0].node.get_our_node_id();
        let payee_pubkey = nodes[3].node.get_our_node_id();
        let recv_value = 15_000_000;
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, true),
-               final_value_msat: recv_value,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(
+               PaymentParameters::for_keysend(payee_pubkey, 40, true), recv_value);
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
        let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger,
@@ -531,10 +524,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let amt_msat = 1_000_000;
        let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
        let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
-       let route_params = RouteParameters {
-               payment_params: route.payment_params.clone().unwrap(),
-               final_value_msat: amt_msat,
-       };
+       let route_params = route.route_params.unwrap().clone();
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@ -673,9 +663,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
                        .unwrap().lock().unwrap();
                let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
-               let mut new_config = channel.context.config();
+               let mut new_config = channel.context().config();
                new_config.forwarding_fee_base_msat += 100_000;
-               channel.context.update_config(&new_config);
+               channel.context_mut().update_config(&new_config);
                new_route.paths[0].hops[0].fee_msat += 100_000;
        }
 
@@ -1111,10 +1101,11 @@ fn get_ldk_payment_preimage() {
        let scorer = test_utils::TestScorer::new();
        let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
-       let route = get_route(
-               &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
-               Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
-               amt_msat, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(),
+               Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()), nodes[0].logger,
+               &scorer, &(), &random_seed_bytes).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
                RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@ -1224,7 +1215,7 @@ fn failed_probe_yields_event() {
 
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
 
-       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000);
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 9_998_000);
 
        let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
 
@@ -1273,7 +1264,7 @@ fn onchain_failed_probe_yields_event() {
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
 
        // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
-       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 1_000);
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 1_000);
        let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
 
        // node[0] -- update_add_htlcs -> node[1]
@@ -1478,7 +1469,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.context.get_short_channel_id().unwrap()
+                       channel_1.context().get_short_channel_id().unwrap()
                );
                assert_eq!(chan_1_used_liquidity, None);
        }
@@ -1490,7 +1481,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.context.get_short_channel_id().unwrap()
+                       channel_2.context().get_short_channel_id().unwrap()
                );
 
                assert_eq!(chan_2_used_liquidity, None);
@@ -1515,7 +1506,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.context.get_short_channel_id().unwrap()
+                       channel_1.context().get_short_channel_id().unwrap()
                );
                // First hop accounts for expected 1000 msat fee
                assert_eq!(chan_1_used_liquidity, Some(501000));
@@ -1528,7 +1519,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.context.get_short_channel_id().unwrap()
+                       channel_2.context().get_short_channel_id().unwrap()
                );
 
                assert_eq!(chan_2_used_liquidity, Some(500000));
@@ -1554,7 +1545,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.context.get_short_channel_id().unwrap()
+                       channel_1.context().get_short_channel_id().unwrap()
                );
                assert_eq!(chan_1_used_liquidity, None);
        }
@@ -1566,7 +1557,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.context.get_short_channel_id().unwrap()
+                       channel_2.context().get_short_channel_id().unwrap()
                );
                assert_eq!(chan_2_used_liquidity, None);
        }
@@ -1607,7 +1598,7 @@ fn test_holding_cell_inflight_htlcs() {
                let used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel.context.get_short_channel_id().unwrap()
+                       channel.context().get_short_channel_id().unwrap()
                );
 
                assert_eq!(used_liquidity, Some(2000000));
@@ -1661,15 +1652,10 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                        }])
                ]).unwrap()
                .with_bolt11_features(nodes[2].node.invoice_features()).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
-       let route = get_route(
-               &nodes[0].node.get_our_node_id(), &route_params.payment_params,
-               &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes,
-       ).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat,);
+       let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &(),
+               &random_seed_bytes,).unwrap();
 
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
@@ -1705,7 +1691,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
        // Check for unknown channel id error.
        let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
        assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  {
-               err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+               err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
                        log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
 
        if test == InterceptTest::Fail {
@@ -1731,8 +1717,8 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
                let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
                assert_eq!(unusable_chan_err , APIError::ChannelUnavailable {
-                       err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
-                               &temp_chan_id, nodes[2].node.get_our_node_id()) });
+                       err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
+                               temp_chan_id, nodes[2].node.get_our_node_id()) });
                assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
 
                // Open the just-in-time channel so the payment can then be forwarded.
@@ -1850,10 +1836,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) {
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_route_hints(route_hints).unwrap()
                .with_bolt11_features(nodes[2].node.invoice_features()).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
@@ -1980,10 +1963,7 @@ fn do_automatic_retries(test: AutoRetry) {
        let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
        let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
 
        macro_rules! pass_failed_attempt_with_retry_along_path {
@@ -2199,10 +2179,7 @@ fn auto_retry_partial_failure() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
        // second (for the initial send path2 over chan_2) fails.
@@ -2234,7 +2211,7 @@ fn auto_retry_partial_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(route_params.payment_params.clone()),
+               route_params: Some(route_params.clone()),
        };
        let retry_1_route = Route {
                paths: vec![
@@ -2255,7 +2232,7 @@ fn auto_retry_partial_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(route_params.payment_params.clone()),
+               route_params: Some(route_params.clone()),
        };
        let retry_2_route = Route {
                paths: vec![
@@ -2268,19 +2245,19 @@ fn auto_retry_partial_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(route_params.payment_params.clone()),
+               route_params: Some(route_params.clone()),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
        let mut payment_params = route_params.payment_params.clone();
        payment_params.previously_failed_channels.push(chan_2_id);
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params, final_value_msat: amt_msat / 2,
-               }, Ok(retry_1_route));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2),
+               Ok(retry_1_route));
        let mut payment_params = route_params.payment_params.clone();
        payment_params.previously_failed_channels.push(chan_3_id);
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params, final_value_msat: amt_msat / 4,
-               }, Ok(retry_2_route));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4),
+               Ok(retry_2_route));
 
        // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -2416,10 +2393,7 @@ fn auto_retry_zero_attempts_send_error() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
@@ -2456,10 +2430,7 @@ fn fails_paying_after_rejected_by_payee() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -2503,10 +2474,8 @@ fn retry_multi_path_single_failed_payment() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params: payment_params.clone(),
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(
+               payment_params.clone(), amt_msat);
 
        let chans = nodes[0].node.list_usable_channels();
        let mut route = Route {
@@ -2528,20 +2497,19 @@ fn retry_multi_path_single_failed_payment() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(payment_params),
+               route_params: Some(route_params.clone()),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        // On retry, split the payment across both channels.
        route.paths[0].hops[0].fee_msat = 50_000_001;
        route.paths[1].hops[0].fee_msat = 50_000_000;
-       let mut pay_params = route.payment_params.clone().unwrap();
+       let mut pay_params = route.route_params.clone().unwrap().payment_params;
        pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap());
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: pay_params,
-                       // Note that the second request here requests the amount we originally failed to send,
-                       // not the amount remaining on the full payment, which should be changed.
-                       final_value_msat: 100_000_001,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               // Note that the second request here requests the amount we originally failed to send,
+               // not the amount remaining on the full payment, which should be changed.
+               RouteParameters::from_payment_params_and_value(pay_params, 100_000_001),
+               Ok(route.clone()));
 
        {
                let scorer = chanmon_cfgs[0].scorer.read().unwrap();
@@ -2596,10 +2564,7 @@ fn immediate_retry_on_failure() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let chans = nodes[0].node.list_usable_channels();
        let mut route = Route {
@@ -2613,7 +2578,9 @@ fn immediate_retry_on_failure() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None },
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       100_000_001)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        // On retry, split the payment across both channels.
@@ -2623,9 +2590,9 @@ fn immediate_retry_on_failure() {
        route.paths[1].hops[0].fee_msat = 50_000_001;
        let mut pay_params = route_params.payment_params.clone();
        pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: pay_params, final_value_msat: amt_msat,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(pay_params, amt_msat),
+               Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -2684,10 +2651,7 @@ fn no_extra_retries_on_back_to_back_fail() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let mut route = Route {
                paths: vec![
@@ -2722,7 +2686,9 @@ fn no_extra_retries_on_back_to_back_fail() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None }
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       100_000_000)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        let mut second_payment_params = route_params.payment_params.clone();
@@ -2730,10 +2696,9 @@ fn no_extra_retries_on_back_to_back_fail() {
        // On retry, we'll only return one path
        route.paths.remove(1);
        route.paths[0].hops[1].fee_msat = amt_msat;
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: second_payment_params,
-                       final_value_msat: amt_msat,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat),
+               Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -2886,10 +2851,7 @@ fn test_simple_partial_retry() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let mut route = Route {
                paths: vec![
@@ -2924,17 +2886,18 @@ fn test_simple_partial_retry() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None }
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       100_000_000)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
        let mut second_payment_params = route_params.payment_params.clone();
        second_payment_params.previously_failed_channels = vec![chan_2_scid];
        // On retry, we'll only be asked for one path (or 100k sats)
        route.paths.remove(0);
-       nodes[0].router.expect_find_route(RouteParameters {
-                       payment_params: second_payment_params,
-                       final_value_msat: amt_msat / 2,
-               }, Ok(route.clone()));
+       nodes[0].router.expect_find_route(
+               RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2),
+               Ok(route.clone()));
 
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@ -3052,10 +3015,7 @@ fn test_threaded_payment_retries() {
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let mut route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        let mut route = Route {
                paths: vec![
@@ -3090,7 +3050,9 @@ fn test_threaded_payment_retries() {
                                cltv_expiry_delta: 100,
                        }], blinded_tail: None }
                ],
-               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+               route_params: Some(RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       amt_msat - amt_msat / 1000)),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
 
@@ -3303,11 +3265,10 @@ fn do_claim_from_closed_chan(fail_payment: bool) {
        create_announced_chan_between_nodes(&nodes, 2, 3);
 
        let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]);
-       let mut route_params = RouteParameters {
-               payment_params: PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+       let mut route_params = RouteParameters::from_payment_params_and_value(
+               PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
                        .with_bolt11_features(nodes[1].node.invoice_features()).unwrap(),
-               final_value_msat: 10_000_000,
-       };
+               10_000_000);
        let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params,
                None, nodes[0].node.compute_inflight_htlcs()).unwrap();
        // Make sure the route is ordered as the B->D path before C->D
@@ -3517,10 +3478,7 @@ fn test_retry_custom_tlvs() {
 
        // Initiate the payment
        let payment_id = PaymentId(payment_hash.0);
-       let mut route_params = RouteParameters {
-               payment_params: route.payment_params.clone().unwrap(),
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = route.route_params.clone().unwrap();
 
        let custom_tlvs = vec![((1 << 16) + 1, vec![0x42u8; 16])];
        let onion_fields = RecipientOnionFields::secret_only(payment_secret);
@@ -3772,10 +3730,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
 
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
-       let mut route_params = RouteParameters {
-               payment_params,
-               final_value_msat: amt_msat,
-       };
+       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
 
        // Send the MPP payment, delivering the updated commitment state to nodes[1].
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields {
index 7565246fe3473fb8b5a78b345436b93ffd357231..1e0294073a25d2b01b470bbbbad16d9f9ab3d305 100644 (file)
@@ -23,7 +23,7 @@ use crate::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessagePro
 use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
 use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use crate::util::ser::{VecWriter, Writeable, Writer};
 use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
@@ -483,7 +483,7 @@ struct Peer {
        /// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
        /// check this.
        their_features: Option<InitFeatures>,
-       their_net_address: Option<NetAddress>,
+       their_socket_address: Option<SocketAddress>,
 
        pending_outbound_buffer: LinkedList<Vec<u8>>,
        pending_outbound_buffer_first_msg_offset: usize,
@@ -855,28 +855,28 @@ impl core::fmt::Display for OptionalFromDebugger<'_> {
 /// A function used to filter out local or private addresses
 /// <https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml>
 /// <https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml>
-fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
+fn filter_addresses(ip_address: Option<SocketAddress>) -> Option<SocketAddress> {
        match ip_address{
                // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
-               Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [10, _, _, _], port: _}) => None,
                // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
-               Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [0, _, _, _], port: _}) => None,
                // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
-               Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [100, 64..=127, _, _], port: _}) => None,
                // For IPv4 range       127.0.0.0 - 127.255.255.255 (127/8)
-               Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [127, _, _, _], port: _}) => None,
                // For IPv4 range       169.254.0.0 - 169.254.255.255 (169.254/16)
-               Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [169, 254, _, _], port: _}) => None,
                // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
-               Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [172, 16..=31, _, _], port: _}) => None,
                // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
-               Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [192, 168, _, _], port: _}) => None,
                // For IPv4 range 192.88.99.0 - 192.88.99.255  (192.88.99/24)
-               Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None,
+               Some(SocketAddress::TcpIpV4{addr: [192, 88, 99, _], port: _}) => None,
                // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
-               Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
+               Some(SocketAddress::TcpIpV6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
                // For remaining addresses
-               Some(NetAddress::IPv6{addr: _, port: _}) => None,
+               Some(SocketAddress::TcpIpV6{addr: _, port: _}) => None,
                Some(..) => ip_address,
                None => None,
        }
@@ -933,14 +933,14 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        ///
        /// The returned `Option`s will only be `Some` if an address had been previously given via
        /// [`Self::new_outbound_connection`] or [`Self::new_inbound_connection`].
-       pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<NetAddress>)> {
+       pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<SocketAddress>)> {
                let peers = self.peers.read().unwrap();
                peers.values().filter_map(|peer_mutex| {
                        let p = peer_mutex.lock().unwrap();
                        if !p.handshake_complete() {
                                return None;
                        }
-                       Some((p.their_node_id.unwrap().0, p.their_net_address.clone()))
+                       Some((p.their_node_id.unwrap().0, p.their_socket_address.clone()))
                }).collect()
        }
 
@@ -973,7 +973,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// [`socket_disconnected`].
        ///
        /// [`socket_disconnected`]: PeerManager::socket_disconnected
-       pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
+       pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<SocketAddress>) -> Result<Vec<u8>, PeerHandleError> {
                let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
                let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
                let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
@@ -989,7 +989,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        channel_encryptor: peer_encryptor,
                                        their_node_id: None,
                                        their_features: None,
-                                       their_net_address: remote_network_address,
+                                       their_socket_address: remote_network_address,
 
                                        pending_outbound_buffer: LinkedList::new(),
                                        pending_outbound_buffer_first_msg_offset: 0,
@@ -1030,7 +1030,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// [`socket_disconnected`].
        ///
        /// [`socket_disconnected`]: PeerManager::socket_disconnected
-       pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
+       pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<SocketAddress>) -> Result<(), PeerHandleError> {
                let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.node_signer);
                let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
 
@@ -1045,7 +1045,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        channel_encryptor: peer_encryptor,
                                        their_node_id: None,
                                        their_features: None,
-                                       their_net_address: remote_network_address,
+                                       their_socket_address: remote_network_address,
 
                                        pending_outbound_buffer: LinkedList::new(),
                                        pending_outbound_buffer_first_msg_offset: 0,
@@ -1368,7 +1368,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                insert_node_id!();
                                                                let features = self.init_features(&their_node_id);
                                                                let networks = self.message_handler.chan_handler.get_genesis_hashes();
-                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
                                                        },
@@ -1381,7 +1381,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                insert_node_id!();
                                                                let features = self.init_features(&their_node_id);
                                                                let networks = self.message_handler.chan_handler.get_genesis_hashes();
-                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+                                                               let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
                                                        },
@@ -2399,7 +2399,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        // be absurd. We ensure this by checking that at least 100 (our stated public contract on when
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
-       const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+       const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
        #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
@@ -2421,7 +2421,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// Panics if `addresses` is absurdly large (more than 100).
        ///
        /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
-       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
+       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<SocketAddress>) {
                if addresses.len() > 100 {
                        panic!("More than half the message size was taken up by public addresses!");
                }
@@ -2488,7 +2488,7 @@ mod tests {
        use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
        use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
        use crate::ln::{msgs, wire};
-       use crate::ln::msgs::{LightningError, NetAddress};
+       use crate::ln::msgs::{LightningError, SocketAddress};
        use crate::util::test_utils;
 
        use bitcoin::Network;
@@ -2647,13 +2647,13 @@ mod tests {
                        fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                        disconnect: Arc::new(AtomicBool::new(false)),
                };
-               let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+               let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap();
                let mut fd_b = FileDescriptor {
                        fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                        disconnect: Arc::new(AtomicBool::new(false)),
                };
-               let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+               let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
@@ -2698,12 +2698,12 @@ mod tests {
                                                fd: $id  + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                                disconnect: Arc::new(AtomicBool::new(false)),
                                        };
-                                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                                       let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                                        let mut fd_b = FileDescriptor {
                                                fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                                disconnect: Arc::new(AtomicBool::new(false)),
                                        };
-                                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                                       let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                                        let initial_data = peers[1].new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                                        peers[0].new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                                        if peers[0].read_event(&mut fd_a, &initial_data).is_err() { break; }
@@ -2770,12 +2770,12 @@ mod tests {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                       let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                        let mut fd_b = FileDescriptor {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                       let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                        let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                        peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                        assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
@@ -2806,12 +2806,12 @@ mod tests {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                       let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
                        let mut fd_b = FileDescriptor {
                                fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
                                disconnect: Arc::new(AtomicBool::new(false)),
                        };
-                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                       let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
                        let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
                        peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
                        assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
@@ -2888,7 +2888,7 @@ mod tests {
                        fd: 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
                        disconnect: Arc::new(AtomicBool::new(false)),
                };
-               let addr_dup = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1003};
+               let addr_dup = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1003};
                let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap();
                peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap();
 
@@ -3026,91 +3026,91 @@ mod tests {
                // Tests the filter_addresses function.
 
                // For (10/8)
-               let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 255, 201], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [10, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (0/8)
-               let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 255, 187], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [0, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (100.64/10)
-               let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [100, 64, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [100, 78, 255, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [100, 127, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (127/8)
-               let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [127, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [127, 65, 73, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [127, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (169.254/16)
-               let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 221, 101], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (172.16/12)
-               let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [172, 16, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [172, 27, 101, 23], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [172, 31, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (192.168/16)
-               let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 205, 159], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (192.88.99/24)
-               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 140], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For other IPv4 addresses
-               let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [188, 255, 99, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [123, 8, 129, 14], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV4{addr: [2, 88, 9, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
 
                // For (2000::/3)
-               let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
-               let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
 
                // For other IPv6 addresses
-               let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
-               let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
+               let ip_address = SocketAddress::TcpIpV6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
                assert_eq!(filter_addresses(Some(ip_address.clone())), None);
 
                // For (None)
index c452630e184529969dd6f45a5f33fd5f9fd4b0f0..eda517e087eb56a12e23ed68be2d6efbf43d1f45 100644 (file)
@@ -18,7 +18,7 @@ use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, Mes
 use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -399,7 +399,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_stale_monitors = Vec::new();
        for serialized in node_0_stale_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_stale_monitors.push(monitor);
        }
@@ -407,7 +407,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_monitors = Vec::new();
        for serialized in node_0_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_monitors.push(monitor);
        }
index f975e18412967bc07a6058572091cf0377eead1f..7216ccc2d3d59e3840d4b05fa79022971f08bc2b 100644 (file)
@@ -13,7 +13,7 @@ use crate::sign::{EntropySource, SignerProvider};
 use crate::chain::transaction::OutPoint;
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
 use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, ChannelShutdownState, ChannelDetails};
-use crate::routing::router::{PaymentParameters, get_route};
+use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
 use crate::ln::script::ShutdownScript;
@@ -313,9 +313,14 @@ fn updates_shutdown_wait() {
        let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
 
        let payment_params_1 = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
-       let route_1 = get_route(&nodes[0].node.get_our_node_id(), &payment_params_1, &nodes[0].network_graph.read_only(), None, 100000, &logger, &scorer, &(), &random_seed_bytes).unwrap();
-       let payment_params_2 = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
-       let route_2 = get_route(&nodes[1].node.get_our_node_id(), &payment_params_2, &nodes[1].network_graph.read_only(), None, 100000, &logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params_1, 100_000);
+       let route_1 = get_route(&nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(), None, &logger, &scorer, &(), &random_seed_bytes).unwrap();
+       let payment_params_2 = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(),
+               TEST_FINAL_CLTV).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params_2, 100_000);
+       let route_2 = get_route(&nodes[1].node.get_our_node_id(), &route_params,
+               &nodes[1].network_graph.read_only(), None, &logger, &scorer, &(), &random_seed_bytes).unwrap();
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route_1, payment_hash,
                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
                ), true, APIError::ChannelUnavailable {..}, {});
@@ -1057,7 +1062,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                {
                        let mut node_0_per_peer_lock;
                        let mut node_0_peer_state_lock;
-                       get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context.closing_fee_limits.as_mut().unwrap().1 *= 10;
+                       get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10;
                }
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
                let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
index 75a844cd117abe5d956ddcbdf7efde5bff44a769..908d2d4bee6d2f6b9091ae0d62ce7e650cbfe754 100644 (file)
@@ -110,6 +110,7 @@ use core::time::Duration;
 use crate::io;
 use crate::blinded_path::BlindedPath;
 use crate::ln::PaymentHash;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures, InvoiceRequestFeatures, OfferFeatures};
 use crate::ln::inbound_payment::ExpandedKey;
 use crate::ln::msgs::DecodeError;
@@ -128,7 +129,7 @@ use crate::prelude::*;
 #[cfg(feature = "std")]
 use std::time::SystemTime;
 
-const DEFAULT_RELATIVE_EXPIRY: Duration = Duration::from_secs(7200);
+pub(crate) const DEFAULT_RELATIVE_EXPIRY: Duration = Duration::from_secs(7200);
 
 /// Tag for the hash function used when signing a [`Bolt12Invoice`]'s merkle root.
 pub const SIGNATURE_TAG: &'static str = concat!("lightning", "invoice", "signature");
@@ -695,10 +696,11 @@ impl Bolt12Invoice {
                merkle::message_digest(SIGNATURE_TAG, &self.bytes).as_ref().clone()
        }
 
-       /// Verifies that the invoice was for a request or refund created using the given key.
+       /// Verifies that the invoice was for a request or refund created using the given key. Returns
+       /// the associated [`PaymentId`] to use when sending the payment.
        pub fn verify<T: secp256k1::Signing>(
                &self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
-       ) -> bool {
+       ) -> Result<PaymentId, ()> {
                self.contents.verify(TlvStream::new(&self.bytes), key, secp_ctx)
        }
 
@@ -947,7 +949,7 @@ impl InvoiceContents {
 
        fn verify<T: secp256k1::Signing>(
                &self, tlv_stream: TlvStream<'_>, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
-       ) -> bool {
+       ) -> Result<PaymentId, ()> {
                let offer_records = tlv_stream.clone().range(OFFER_TYPES);
                let invreq_records = tlv_stream.range(INVOICE_REQUEST_TYPES).filter(|record| {
                        match record.r#type {
@@ -967,10 +969,7 @@ impl InvoiceContents {
                        },
                };
 
-               match signer::verify_metadata(metadata, key, iv_bytes, payer_id, tlv_stream, secp_ctx) {
-                       Ok(_) => true,
-                       Err(()) => false,
-               }
+               signer::verify_payer_metadata(metadata, key, iv_bytes, payer_id, tlv_stream, secp_ctx)
        }
 
        fn derives_keys(&self) -> bool {
@@ -1642,36 +1641,31 @@ mod tests {
                        .build().unwrap()
                        .sign(payer_sign).unwrap();
 
-               if let Err(e) = invoice_request
-                       .verify_and_respond_using_derived_keys_no_std(
-                               payment_paths(), payment_hash(), now(), &expanded_key, &secp_ctx
-                       )
-                       .unwrap()
+               if let Err(e) = invoice_request.clone()
+                       .verify(&expanded_key, &secp_ctx).unwrap()
+                       .respond_using_derived_keys_no_std(payment_paths(), payment_hash(), now()).unwrap()
                        .build_and_sign(&secp_ctx)
                {
                        panic!("error building invoice: {:?}", e);
                }
 
                let expanded_key = ExpandedKey::new(&KeyMaterial([41; 32]));
-               match invoice_request.verify_and_respond_using_derived_keys_no_std(
-                       payment_paths(), payment_hash(), now(), &expanded_key, &secp_ctx
-               ) {
-                       Ok(_) => panic!("expected error"),
-                       Err(e) => assert_eq!(e, Bolt12SemanticError::InvalidMetadata),
-               }
+               assert!(invoice_request.verify(&expanded_key, &secp_ctx).is_err());
 
                let desc = "foo".to_string();
                let offer = OfferBuilder
                        ::deriving_signing_pubkey(desc, node_id, &expanded_key, &entropy, &secp_ctx)
                        .amount_msats(1000)
+                       // Omit the path so that node_id is used for the signing pubkey instead of deriving
                        .build().unwrap();
                let invoice_request = offer.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
                        .build().unwrap()
                        .sign(payer_sign).unwrap();
 
-               match invoice_request.verify_and_respond_using_derived_keys_no_std(
-                       payment_paths(), payment_hash(), now(), &expanded_key, &secp_ctx
-               ) {
+               match invoice_request
+                       .verify(&expanded_key, &secp_ctx).unwrap()
+                       .respond_using_derived_keys_no_std(payment_paths(), payment_hash(), now())
+               {
                        Ok(_) => panic!("expected error"),
                        Err(e) => assert_eq!(e, Bolt12SemanticError::InvalidMetadata),
                }
index 03af068d1d61912738f23ac71ff3c91079161ffe..fb0b0205bd689e1356b65ae5f7c79690479a55f5 100644 (file)
@@ -64,6 +64,7 @@ use crate::sign::EntropySource;
 use crate::io;
 use crate::blinded_path::BlindedPath;
 use crate::ln::PaymentHash;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::InvoiceRequestFeatures;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::ln::msgs::DecodeError;
@@ -128,10 +129,12 @@ impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerI
        }
 
        pub(super) fn deriving_metadata<ES: Deref>(
-               offer: &'a Offer, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES
+               offer: &'a Offer, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
+               payment_id: PaymentId,
        ) -> Self where ES::Target: EntropySource {
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let payment_id = Some(payment_id);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, payment_id);
                let metadata = Metadata::Derived(derivation_material);
                Self {
                        offer,
@@ -145,10 +148,12 @@ impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerI
 
 impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T> {
        pub(super) fn deriving_payer_id<ES: Deref>(
-               offer: &'a Offer, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>
+               offer: &'a Offer, expanded_key: &ExpandedKey, entropy_source: ES,
+               secp_ctx: &'b Secp256k1<T>, payment_id: PaymentId
        ) -> Self where ES::Target: EntropySource {
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let payment_id = Some(payment_id);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, payment_id);
                let metadata = Metadata::DerivedSigningPubkey(derivation_material);
                Self {
                        offer,
@@ -259,7 +264,7 @@ impl<'a, 'b, P: PayerIdStrategy, T: secp256k1::Signing> InvoiceRequestBuilder<'a
                        let mut tlv_stream = self.invoice_request.as_tlv_stream();
                        debug_assert!(tlv_stream.2.payer_id.is_none());
                        tlv_stream.0.metadata = None;
-                       if !metadata.derives_keys() {
+                       if !metadata.derives_payer_keys() {
                                tlv_stream.2.payer_id = self.payer_id.as_ref();
                        }
 
@@ -424,6 +429,24 @@ pub struct InvoiceRequest {
        signature: Signature,
 }
 
+/// An [`InvoiceRequest`] that has been verified by [`InvoiceRequest::verify`] and exposes different
+/// ways to respond depending on whether the signing keys were derived.
+#[derive(Clone, Debug)]
+pub struct VerifiedInvoiceRequest {
+       /// The verified request.
+       inner: InvoiceRequest,
+
+       /// Keys used for signing a [`Bolt12Invoice`] if they can be derived.
+       ///
+       /// If `Some`, must call [`respond_using_derived_keys`] when responding. Otherwise, call
+       /// [`respond_with`].
+       ///
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+       /// [`respond_using_derived_keys`]: Self::respond_using_derived_keys
+       /// [`respond_with`]: Self::respond_with
+       pub keys: Option<KeyPair>,
+}
+
 /// The contents of an [`InvoiceRequest`], which may be shared with an [`Bolt12Invoice`].
 ///
 /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
@@ -542,9 +565,15 @@ impl InvoiceRequest {
        ///
        /// Errors if the request contains unknown required features.
        ///
+       /// # Note
+       ///
+       /// If the originating [`Offer`] was created using [`OfferBuilder::deriving_signing_pubkey`],
+       /// then use [`InvoiceRequest::verify`] and [`VerifiedInvoiceRequest`] methods instead.
+       ///
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Bolt12Invoice::created_at`]: crate::offers::invoice::Bolt12Invoice::created_at
+       /// [`OfferBuilder::deriving_signing_pubkey`]: crate::offers::offer::OfferBuilder::deriving_signing_pubkey
        pub fn respond_with_no_std(
                &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
                created_at: core::time::Duration
@@ -556,6 +585,63 @@ impl InvoiceRequest {
                InvoiceBuilder::for_offer(self, payment_paths, created_at, payment_hash)
        }
 
+       /// Verifies that the request was for an offer created using the given key. Returns the verified
+       /// request which contains the derived keys needed to sign a [`Bolt12Invoice`] for the request
+       /// if they could be extracted from the metadata.
+       ///
+       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+       pub fn verify<T: secp256k1::Signing>(
+               self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+       ) -> Result<VerifiedInvoiceRequest, ()> {
+               let keys = self.contents.inner.offer.verify(&self.bytes, key, secp_ctx)?;
+               Ok(VerifiedInvoiceRequest {
+                       inner: self,
+                       keys,
+               })
+       }
+
+       #[cfg(test)]
+       fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
+               let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream) =
+                       self.contents.as_tlv_stream();
+               let signature_tlv_stream = SignatureTlvStreamRef {
+                       signature: Some(&self.signature),
+               };
+               (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream, signature_tlv_stream)
+       }
+}
+
+impl VerifiedInvoiceRequest {
+       offer_accessors!(self, self.inner.contents.inner.offer);
+       invoice_request_accessors!(self, self.inner.contents);
+
+       /// Creates an [`InvoiceBuilder`] for the request with the given required fields and using the
+       /// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
+       ///
+       /// See [`InvoiceRequest::respond_with_no_std`] for further details.
+       ///
+       /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+       ///
+       /// [`Duration`]: core::time::Duration
+       #[cfg(feature = "std")]
+       pub fn respond_with(
+               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
+       ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
+               self.inner.respond_with(payment_paths, payment_hash)
+       }
+
+       /// Creates an [`InvoiceBuilder`] for the request with the given required fields.
+       ///
+       /// See [`InvoiceRequest::respond_with_no_std`] for further details.
+       ///
+       /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+       pub fn respond_with_no_std(
+               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+               created_at: core::time::Duration
+       ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
+               self.inner.respond_with_no_std(payment_paths, payment_hash, created_at)
+       }
+
        /// Creates an [`InvoiceBuilder`] for the request using the given required fields and that uses
        /// derived signing keys from the originating [`Offer`] to sign the [`Bolt12Invoice`]. Must use
        /// the same [`ExpandedKey`] as the one used to create the offer.
@@ -566,17 +652,14 @@ impl InvoiceRequest {
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        #[cfg(feature = "std")]
-       pub fn verify_and_respond_using_derived_keys<T: secp256k1::Signing>(
-               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
-               expanded_key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+       pub fn respond_using_derived_keys(
+               &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
        ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError> {
                let created_at = std::time::SystemTime::now()
                        .duration_since(std::time::SystemTime::UNIX_EPOCH)
                        .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
 
-               self.verify_and_respond_using_derived_keys_no_std(
-                       payment_paths, payment_hash, created_at, expanded_key, secp_ctx
-               )
+               self.respond_using_derived_keys_no_std(payment_paths, payment_hash, created_at)
        }
 
        /// Creates an [`InvoiceBuilder`] for the request using the given required fields and that uses
@@ -588,42 +671,22 @@ impl InvoiceRequest {
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-       pub fn verify_and_respond_using_derived_keys_no_std<T: secp256k1::Signing>(
+       pub fn respond_using_derived_keys_no_std(
                &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
-               created_at: core::time::Duration, expanded_key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+               created_at: core::time::Duration
        ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError> {
-               if self.invoice_request_features().requires_unknown_bits() {
+               if self.inner.invoice_request_features().requires_unknown_bits() {
                        return Err(Bolt12SemanticError::UnknownRequiredFeatures);
                }
 
-               let keys = match self.verify(expanded_key, secp_ctx) {
-                       Err(()) => return Err(Bolt12SemanticError::InvalidMetadata),
-                       Ok(None) => return Err(Bolt12SemanticError::InvalidMetadata),
-                       Ok(Some(keys)) => keys,
+               let keys = match self.keys {
+                       None => return Err(Bolt12SemanticError::InvalidMetadata),
+                       Some(keys) => keys,
                };
 
-               InvoiceBuilder::for_offer_using_keys(self, payment_paths, created_at, payment_hash, keys)
-       }
-
-       /// Verifies that the request was for an offer created using the given key. Returns the derived
-       /// keys need to sign an [`Bolt12Invoice`] for the request if they could be extracted from the
-       /// metadata.
-       ///
-       /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-       pub fn verify<T: secp256k1::Signing>(
-               &self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
-       ) -> Result<Option<KeyPair>, ()> {
-               self.contents.inner.offer.verify(&self.bytes, key, secp_ctx)
-       }
-
-       #[cfg(test)]
-       fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
-               let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream) =
-                       self.contents.as_tlv_stream();
-               let signature_tlv_stream = SignatureTlvStreamRef {
-                       signature: Some(&self.signature),
-               };
-               (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream, signature_tlv_stream)
+               InvoiceBuilder::for_offer_using_keys(
+                       &self.inner, payment_paths, created_at, payment_hash, keys
+               )
        }
 }
 
@@ -633,7 +696,7 @@ impl InvoiceRequestContents {
        }
 
        pub(super) fn derives_keys(&self) -> bool {
-               self.inner.payer.0.derives_keys()
+               self.inner.payer.0.derives_payer_keys()
        }
 
        pub(super) fn chain(&self) -> ChainHash {
@@ -866,6 +929,7 @@ mod tests {
        #[cfg(feature = "std")]
        use core::time::Duration;
        use crate::sign::KeyMaterial;
+       use crate::ln::channelmanager::PaymentId;
        use crate::ln::features::{InvoiceRequestFeatures, OfferFeatures};
        use crate::ln::inbound_payment::ExpandedKey;
        use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -1011,12 +1075,13 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let offer = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .amount_msats(1000)
                        .build().unwrap();
                let invoice_request = offer
-                       .request_invoice_deriving_metadata(payer_id, &expanded_key, &entropy)
+                       .request_invoice_deriving_metadata(payer_id, &expanded_key, &entropy, payment_id)
                        .unwrap()
                        .build().unwrap()
                        .sign(payer_sign).unwrap();
@@ -1026,7 +1091,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                // Fails verification with altered fields
                let (
@@ -1049,7 +1117,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered metadata
                let (
@@ -1072,7 +1140,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
@@ -1080,12 +1148,13 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let offer = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .amount_msats(1000)
                        .build().unwrap();
                let invoice_request = offer
-                       .request_invoice_deriving_payer_id(&expanded_key, &entropy, &secp_ctx)
+                       .request_invoice_deriving_payer_id(&expanded_key, &entropy, &secp_ctx, payment_id)
                        .unwrap()
                        .build_and_sign()
                        .unwrap();
@@ -1094,7 +1163,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                // Fails verification with altered fields
                let (
@@ -1117,7 +1189,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered payer id
                let (
@@ -1140,7 +1212,7 @@ mod tests {
                signature_tlv_stream.write(&mut encoded_invoice).unwrap();
 
                let invoice = Bolt12Invoice::try_from(encoded_invoice).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
index c62702711c604d79047334c3f79b7217d7883b52..3593b14f1a81185c92536e382877bb0079f87051 100644 (file)
@@ -22,7 +22,6 @@ pub mod merkle;
 pub mod parse;
 mod payer;
 pub mod refund;
-#[allow(unused)]
 pub(crate) mod signer;
 #[cfg(test)]
-mod test_utils;
+pub(crate) mod test_utils;
index f6aa354b9e4f3d6c9efd2aee4ac47a04c8741dc0..e0bc63e8b2b8109e0b9243a1a32ecfb0a5a3d073 100644 (file)
@@ -77,6 +77,7 @@ use core::time::Duration;
 use crate::sign::EntropySource;
 use crate::io;
 use crate::blinded_path::BlindedPath;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::OfferFeatures;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::ln::msgs::MAX_VALUE_MSAT;
@@ -169,7 +170,7 @@ impl<'a, T: secp256k1::Signing> OfferBuilder<'a, DerivedMetadata, T> {
                secp_ctx: &'a Secp256k1<T>
        ) -> Self where ES::Target: EntropySource {
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, None);
                let metadata = Metadata::DerivedSigningPubkey(derivation_material);
                OfferBuilder {
                        offer: OfferContents {
@@ -283,7 +284,7 @@ impl<'a, M: MetadataStrategy, T: secp256k1::Signing> OfferBuilder<'a, M, T> {
                                let mut tlv_stream = self.offer.as_tlv_stream();
                                debug_assert_eq!(tlv_stream.metadata, None);
                                tlv_stream.metadata = None;
-                               if metadata.derives_keys() {
+                               if metadata.derives_recipient_keys() {
                                        tlv_stream.node_id = None;
                                }
 
@@ -454,10 +455,12 @@ impl Offer {
 
        /// Similar to [`Offer::request_invoice`] except it:
        /// - derives the [`InvoiceRequest::payer_id`] such that a different key can be used for each
-       ///   request, and
-       /// - sets the [`InvoiceRequest::payer_metadata`] when [`InvoiceRequestBuilder::build`] is
-       ///   called such that it can be used by [`Bolt12Invoice::verify`] to determine if the invoice
-       ///   was requested using a base [`ExpandedKey`] from which the payer id was derived.
+       ///   request,
+       /// - sets [`InvoiceRequest::payer_metadata`] when [`InvoiceRequestBuilder::build`] is called
+       ///   such that it can be used by [`Bolt12Invoice::verify`] to determine if the invoice was
+       ///   requested using a base [`ExpandedKey`] from which the payer id was derived, and
+       /// - includes the [`PaymentId`] encrypted in [`InvoiceRequest::payer_metadata`] so that it can
+       ///   be used when sending the payment for the requested invoice.
        ///
        /// Useful to protect the sender's privacy.
        ///
@@ -468,7 +471,8 @@ impl Offer {
        /// [`Bolt12Invoice::verify`]: crate::offers::invoice::Bolt12Invoice::verify
        /// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
        pub fn request_invoice_deriving_payer_id<'a, 'b, ES: Deref, T: secp256k1::Signing>(
-               &'a self, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>
+               &'a self, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>,
+               payment_id: PaymentId
        ) -> Result<InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T>, Bolt12SemanticError>
        where
                ES::Target: EntropySource,
@@ -477,7 +481,9 @@ impl Offer {
                        return Err(Bolt12SemanticError::UnknownRequiredFeatures);
                }
 
-               Ok(InvoiceRequestBuilder::deriving_payer_id(self, expanded_key, entropy_source, secp_ctx))
+               Ok(InvoiceRequestBuilder::deriving_payer_id(
+                       self, expanded_key, entropy_source, secp_ctx, payment_id
+               ))
        }
 
        /// Similar to [`Offer::request_invoice_deriving_payer_id`] except uses `payer_id` for the
@@ -489,7 +495,8 @@ impl Offer {
        ///
        /// [`InvoiceRequest::payer_id`]: crate::offers::invoice_request::InvoiceRequest::payer_id
        pub fn request_invoice_deriving_metadata<ES: Deref>(
-               &self, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES
+               &self, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
+               payment_id: PaymentId
        ) -> Result<InvoiceRequestBuilder<ExplicitPayerId, secp256k1::SignOnly>, Bolt12SemanticError>
        where
                ES::Target: EntropySource,
@@ -498,7 +505,9 @@ impl Offer {
                        return Err(Bolt12SemanticError::UnknownRequiredFeatures);
                }
 
-               Ok(InvoiceRequestBuilder::deriving_metadata(self, payer_id, expanded_key, entropy_source))
+               Ok(InvoiceRequestBuilder::deriving_metadata(
+                       self, payer_id, expanded_key, entropy_source, payment_id
+               ))
        }
 
        /// Creates an [`InvoiceRequestBuilder`] for the offer with the given `metadata` and `payer_id`,
@@ -661,11 +670,13 @@ impl OfferContents {
                                let tlv_stream = TlvStream::new(bytes).range(OFFER_TYPES).filter(|record| {
                                        match record.r#type {
                                                OFFER_METADATA_TYPE => false,
-                                               OFFER_NODE_ID_TYPE => !self.metadata.as_ref().unwrap().derives_keys(),
+                                               OFFER_NODE_ID_TYPE => {
+                                                       !self.metadata.as_ref().unwrap().derives_recipient_keys()
+                                               },
                                                _ => true,
                                        }
                                });
-                               signer::verify_metadata(
+                               signer::verify_recipient_metadata(
                                        metadata, key, IV_BYTES, self.signing_pubkey(), tlv_stream, secp_ctx
                                )
                        },
index d419e8fe0d2b41e06c8b44f5f6215d8d07a221a9..4b4572b4df9c85fd8970e9758b727d9b6214a067 100644 (file)
@@ -82,6 +82,7 @@ use crate::sign::EntropySource;
 use crate::io;
 use crate::blinded_path::BlindedPath;
 use crate::ln::PaymentHash;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::features::InvoiceRequestFeatures;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -147,18 +148,22 @@ impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
        /// Also, sets the metadata when [`RefundBuilder::build`] is called such that it can be used to
        /// verify that an [`InvoiceRequest`] was produced for the refund given an [`ExpandedKey`].
        ///
+       /// The `payment_id` is encrypted in the metadata and should be unique. This ensures that only
+       /// one invoice will be paid for the refund and that payments can be uniquely identified.
+       ///
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        /// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
        pub fn deriving_payer_id<ES: Deref>(
                description: String, node_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
-               secp_ctx: &'a Secp256k1<T>, amount_msats: u64
+               secp_ctx: &'a Secp256k1<T>, amount_msats: u64, payment_id: PaymentId
        ) -> Result<Self, Bolt12SemanticError> where ES::Target: EntropySource {
                if amount_msats > MAX_VALUE_MSAT {
                        return Err(Bolt12SemanticError::InvalidAmount);
                }
 
                let nonce = Nonce::from_entropy_source(entropy_source);
-               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES);
+               let payment_id = Some(payment_id);
+               let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, payment_id);
                let metadata = Metadata::DerivedSigningPubkey(derivation_material);
                Ok(Self {
                        refund: RefundContents {
@@ -244,7 +249,7 @@ impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
 
                        let mut tlv_stream = self.refund.as_tlv_stream();
                        tlv_stream.0.metadata = None;
-                       if metadata.derives_keys() {
+                       if metadata.derives_payer_keys() {
                                tlv_stream.2.payer_id = None;
                        }
 
@@ -566,7 +571,7 @@ impl RefundContents {
        }
 
        pub(super) fn derives_keys(&self) -> bool {
-               self.payer.0.derives_keys()
+               self.payer.0.derives_payer_keys()
        }
 
        pub(super) fn as_tlv_stream(&self) -> RefundTlvStreamRef {
@@ -748,6 +753,7 @@ mod tests {
        use core::time::Duration;
        use crate::blinded_path::{BlindedHop, BlindedPath};
        use crate::sign::KeyMaterial;
+       use crate::ln::channelmanager::PaymentId;
        use crate::ln::features::{InvoiceRequestFeatures, OfferFeatures};
        use crate::ln::inbound_payment::ExpandedKey;
        use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
@@ -841,9 +847,10 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let refund = RefundBuilder
-                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000)
+                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000, payment_id)
                        .unwrap()
                        .build().unwrap();
                assert_eq!(refund.payer_id(), node_id);
@@ -854,7 +861,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                let mut tlv_stream = refund.as_tlv_stream();
                tlv_stream.2.amount = Some(2000);
@@ -867,7 +877,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered metadata
                let mut tlv_stream = refund.as_tlv_stream();
@@ -882,7 +892,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
@@ -892,6 +902,7 @@ mod tests {
                let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
                let entropy = FixedEntropy {};
                let secp_ctx = Secp256k1::new();
+               let payment_id = PaymentId([1; 32]);
 
                let blinded_path = BlindedPath {
                        introduction_node_id: pubkey(40),
@@ -903,7 +914,7 @@ mod tests {
                };
 
                let refund = RefundBuilder
-                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000)
+                       ::deriving_payer_id(desc, node_id, &expanded_key, &entropy, &secp_ctx, 1000, payment_id)
                        .unwrap()
                        .path(blinded_path)
                        .build().unwrap();
@@ -914,7 +925,10 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(invoice.verify(&expanded_key, &secp_ctx));
+               match invoice.verify(&expanded_key, &secp_ctx) {
+                       Ok(payment_id) => assert_eq!(payment_id, PaymentId([1; 32])),
+                       Err(()) => panic!("verification failed"),
+               }
 
                // Fails verification with altered fields
                let mut tlv_stream = refund.as_tlv_stream();
@@ -928,7 +942,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
 
                // Fails verification with altered payer_id
                let mut tlv_stream = refund.as_tlv_stream();
@@ -943,7 +957,7 @@ mod tests {
                        .unwrap()
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
-               assert!(!invoice.verify(&expanded_key, &secp_ctx));
+               assert!(invoice.verify(&expanded_key, &secp_ctx).is_err());
        }
 
        #[test]
index 8d5f98e6f6b050993474bbedbcc9a0f25c409980..4d5d4662bd62b806cb78543e41653c266a02146a 100644 (file)
@@ -16,15 +16,26 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey, self};
 use core::convert::TryFrom;
 use core::fmt;
+use crate::ln::channelmanager::PaymentId;
 use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
 use crate::offers::merkle::TlvRecord;
 use crate::util::ser::Writeable;
 
 use crate::prelude::*;
 
+// Use a different HMAC input for each derivation. Otherwise, an attacker could:
+// - take an Offer that has metadata consisting of a nonce and HMAC
+// - strip off the HMAC and replace the signing_pubkey where the privkey is the HMAC,
+// - generate and sign an invoice using the new signing_pubkey, and
+// - claim they paid it since they would know the preimage of the invoice's payment_hash
 const DERIVED_METADATA_HMAC_INPUT: &[u8; 16] = &[1; 16];
 const DERIVED_METADATA_AND_KEYS_HMAC_INPUT: &[u8; 16] = &[2; 16];
 
+// Additional HMAC inputs to distinguish use cases, either Offer or Refund/InvoiceRequest, where
+// metadata for the latter contain an encrypted PaymentId.
+const WITHOUT_ENCRYPTED_PAYMENT_ID_HMAC_INPUT: &[u8; 16] = &[3; 16];
+const WITH_ENCRYPTED_PAYMENT_ID_HMAC_INPUT: &[u8; 16] = &[4; 16];
+
 /// Message metadata which possibly is derived from [`MetadataMaterial`] such that it can be
 /// verified.
 #[derive(Clone)]
@@ -56,7 +67,20 @@ impl Metadata {
                }
        }
 
-       pub fn derives_keys(&self) -> bool {
+       pub fn derives_payer_keys(&self) -> bool {
+               match self {
+                       // Infer whether Metadata::derived_from was called on Metadata::DerivedSigningPubkey to
+                       // produce Metadata::Bytes. This is merely to determine which fields should be included
+                       // when verifying a message. It doesn't necessarily indicate that keys were in fact
+                       // derived, as wouldn't be the case if a Metadata::Bytes with length PaymentId::LENGTH +
+                       // Nonce::LENGTH had been set explicitly.
+                       Metadata::Bytes(bytes) => bytes.len() == PaymentId::LENGTH + Nonce::LENGTH,
+                       Metadata::Derived(_) => false,
+                       Metadata::DerivedSigningPubkey(_) => true,
+               }
+       }
+
+       pub fn derives_recipient_keys(&self) -> bool {
                match self {
                        // Infer whether Metadata::derived_from was called on Metadata::DerivedSigningPubkey to
                        // produce Metadata::Bytes. This is merely to determine which fields should be included
@@ -132,20 +156,33 @@ impl PartialEq for Metadata {
 pub(super) struct MetadataMaterial {
        nonce: Nonce,
        hmac: HmacEngine<Sha256>,
+       // Some for payer metadata and None for offer metadata
+       encrypted_payment_id: Option<[u8; PaymentId::LENGTH]>,
 }
 
 impl MetadataMaterial {
-       pub fn new(nonce: Nonce, expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN]) -> Self {
+       pub fn new(
+               nonce: Nonce, expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
+               payment_id: Option<PaymentId>
+       ) -> Self {
+               // Encrypt payment_id
+               let encrypted_payment_id = payment_id.map(|payment_id| {
+                       expanded_key.crypt_for_offer(payment_id.0, nonce)
+               });
+
                Self {
                        nonce,
                        hmac: expanded_key.hmac_for_offer(nonce, iv_bytes),
+                       encrypted_payment_id,
                }
        }
 
        fn derive_metadata(mut self) -> Vec<u8> {
                self.hmac.input(DERIVED_METADATA_HMAC_INPUT);
+               self.maybe_include_encrypted_payment_id();
 
-               let mut bytes = self.nonce.as_slice().to_vec();
+               let mut bytes = self.encrypted_payment_id.map(|id| id.to_vec()).unwrap_or(vec![]);
+               bytes.extend_from_slice(self.nonce.as_slice());
                bytes.extend_from_slice(&Hmac::from_engine(self.hmac).into_inner());
                bytes
        }
@@ -154,11 +191,26 @@ impl MetadataMaterial {
                mut self, secp_ctx: &Secp256k1<T>
        ) -> (Vec<u8>, KeyPair) {
                self.hmac.input(DERIVED_METADATA_AND_KEYS_HMAC_INPUT);
+               self.maybe_include_encrypted_payment_id();
+
+               let mut bytes = self.encrypted_payment_id.map(|id| id.to_vec()).unwrap_or(vec![]);
+               bytes.extend_from_slice(self.nonce.as_slice());
 
                let hmac = Hmac::from_engine(self.hmac);
                let privkey = SecretKey::from_slice(hmac.as_inner()).unwrap();
                let keys = KeyPair::from_secret_key(secp_ctx, &privkey);
-               (self.nonce.as_slice().to_vec(), keys)
+
+               (bytes, keys)
+       }
+
+       fn maybe_include_encrypted_payment_id(&mut self) {
+               match self.encrypted_payment_id {
+                       None => self.hmac.input(WITHOUT_ENCRYPTED_PAYMENT_ID_HMAC_INPUT),
+                       Some(encrypted_payment_id) => {
+                               self.hmac.input(WITH_ENCRYPTED_PAYMENT_ID_HMAC_INPUT);
+                               self.hmac.input(&encrypted_payment_id)
+                       },
+               }
        }
 }
 
@@ -170,19 +222,65 @@ pub(super) fn derive_keys(nonce: Nonce, expanded_key: &ExpandedKey) -> KeyPair {
        KeyPair::from_secret_key(&secp_ctx, &privkey)
 }
 
+/// Verifies data given in a TLV stream was used to produce the given metadata, consisting of:
+/// - a 256-bit [`PaymentId`],
+/// - a 128-bit [`Nonce`], and possibly
+/// - a [`Sha256`] hash of the nonce and the TLV records using the [`ExpandedKey`].
+///
+/// If the latter is not included in the metadata, the TLV stream is used to check if the given
+/// `signing_pubkey` can be derived from it.
+///
+/// Returns the [`PaymentId`] that should be used for sending the payment.
+pub(super) fn verify_payer_metadata<'a, T: secp256k1::Signing>(
+       metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
+       signing_pubkey: PublicKey, tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>,
+       secp_ctx: &Secp256k1<T>
+) -> Result<PaymentId, ()> {
+       if metadata.len() < PaymentId::LENGTH {
+               return Err(());
+       }
+
+       let mut encrypted_payment_id = [0u8; PaymentId::LENGTH];
+       encrypted_payment_id.copy_from_slice(&metadata[..PaymentId::LENGTH]);
+
+       let mut hmac = hmac_for_message(
+               &metadata[PaymentId::LENGTH..], expanded_key, iv_bytes, tlv_stream
+       )?;
+       hmac.input(WITH_ENCRYPTED_PAYMENT_ID_HMAC_INPUT);
+       hmac.input(&encrypted_payment_id);
+
+       verify_metadata(
+               &metadata[PaymentId::LENGTH..], Hmac::from_engine(hmac), signing_pubkey, secp_ctx
+       )?;
+
+       let nonce = Nonce::try_from(&metadata[PaymentId::LENGTH..][..Nonce::LENGTH]).unwrap();
+       let payment_id = expanded_key.crypt_for_offer(encrypted_payment_id, nonce);
+
+       Ok(PaymentId(payment_id))
+}
+
 /// Verifies data given in a TLV stream was used to produce the given metadata, consisting of:
 /// - a 128-bit [`Nonce`] and possibly
 /// - a [`Sha256`] hash of the nonce and the TLV records using the [`ExpandedKey`].
 ///
 /// If the latter is not included in the metadata, the TLV stream is used to check if the given
 /// `signing_pubkey` can be derived from it.
-pub(super) fn verify_metadata<'a, T: secp256k1::Signing>(
+///
+/// Returns the [`KeyPair`] for signing the invoice, if it can be derived from the metadata.
+pub(super) fn verify_recipient_metadata<'a, T: secp256k1::Signing>(
        metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
        signing_pubkey: PublicKey, tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>,
        secp_ctx: &Secp256k1<T>
 ) -> Result<Option<KeyPair>, ()> {
-       let hmac = hmac_for_message(metadata, expanded_key, iv_bytes, tlv_stream)?;
+       let mut hmac = hmac_for_message(metadata, expanded_key, iv_bytes, tlv_stream)?;
+       hmac.input(WITHOUT_ENCRYPTED_PAYMENT_ID_HMAC_INPUT);
+
+       verify_metadata(metadata, Hmac::from_engine(hmac), signing_pubkey, secp_ctx)
+}
 
+fn verify_metadata<T: secp256k1::Signing>(
+       metadata: &[u8], hmac: Hmac<Sha256>, signing_pubkey: PublicKey, secp_ctx: &Secp256k1<T>
+) -> Result<Option<KeyPair>, ()> {
        if metadata.len() == Nonce::LENGTH {
                let derived_keys = KeyPair::from_secret_key(
                        secp_ctx, &SecretKey::from_slice(hmac.as_inner()).unwrap()
@@ -206,7 +304,7 @@ pub(super) fn verify_metadata<'a, T: secp256k1::Signing>(
 fn hmac_for_message<'a>(
        metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN],
        tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>
-) -> Result<Hmac<Sha256>, ()> {
+) -> Result<HmacEngine<Sha256>, ()> {
        if metadata.len() < Nonce::LENGTH {
                return Err(());
        }
@@ -227,5 +325,5 @@ fn hmac_for_message<'a>(
                hmac.input(DERIVED_METADATA_HMAC_INPUT);
        }
 
-       Ok(Hmac::from_engine(hmac))
+       Ok(hmac)
 }
index f1b3c79edc0ec15cd5f68a406c5f6a8e7f7c3f91..39122472eacb5513640f7fd78020a10ea783957d 100644 (file)
@@ -20,33 +20,33 @@ use crate::ln::features::BlindedHopFeatures;
 use crate::offers::invoice::BlindedPayInfo;
 use crate::offers::merkle::TaggedHash;
 
-pub(super) fn payer_keys() -> KeyPair {
+pub(crate) fn payer_keys() -> KeyPair {
        let secp_ctx = Secp256k1::new();
        KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap())
 }
 
-pub(super) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
        let secp_ctx = Secp256k1::new();
        let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
        Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
 }
 
-pub(super) fn payer_pubkey() -> PublicKey {
+pub(crate) fn payer_pubkey() -> PublicKey {
        payer_keys().public_key()
 }
 
-pub(super) fn recipient_keys() -> KeyPair {
+pub(crate) fn recipient_keys() -> KeyPair {
        let secp_ctx = Secp256k1::new();
        KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap())
 }
 
-pub(super) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
        let secp_ctx = Secp256k1::new();
        let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
        Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
 }
 
-pub(super) fn recipient_pubkey() -> PublicKey {
+pub(crate) fn recipient_pubkey() -> PublicKey {
        recipient_keys().public_key()
 }
 
@@ -59,7 +59,7 @@ pub(super) fn privkey(byte: u8) -> SecretKey {
        SecretKey::from_slice(&[byte; 32]).unwrap()
 }
 
-pub(super) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
+pub(crate) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
        let paths = vec![
                BlindedPath {
                        introduction_node_id: pubkey(40),
@@ -101,17 +101,17 @@ pub(super) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
        payinfo.into_iter().zip(paths.into_iter()).collect()
 }
 
-pub(super) fn payment_hash() -> PaymentHash {
+pub(crate) fn payment_hash() -> PaymentHash {
        PaymentHash([42; 32])
 }
 
-pub(super) fn now() -> Duration {
+pub(crate) fn now() -> Duration {
        std::time::SystemTime::now()
                .duration_since(std::time::SystemTime::UNIX_EPOCH)
                .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH")
 }
 
-pub(super) struct FixedEntropy;
+pub(crate) struct FixedEntropy;
 
 impl EntropySource for FixedEntropy {
        fn get_secure_random_bytes(&self) -> [u8; 32] {
index 64f3585b95332d464b7ec593c64d88152369a36f..ef776a44dc11c484526f7f5baeb64db06498710b 100644 (file)
@@ -25,7 +25,7 @@ use bitcoin::blockdata::constants::genesis_block;
 use crate::events::{MessageSendEvent, MessageSendEventsProvider};
 use crate::ln::ChannelId;
 use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
-use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
+use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT};
 use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
 use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
 use crate::ln::msgs;
@@ -1128,7 +1128,7 @@ pub struct NodeAnnouncementInfo {
 
 impl NodeAnnouncementInfo {
        /// Internet-level addresses via which one can connect to the node
-       pub fn addresses(&self) -> &[NetAddress] {
+       pub fn addresses(&self) -> &[SocketAddress] {
                self.announcement_message.as_ref()
                        .map(|msg| msg.contents.addresses.as_slice())
                        .unwrap_or_default()
@@ -1137,7 +1137,7 @@ impl NodeAnnouncementInfo {
 
 impl Writeable for NodeAnnouncementInfo {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               let empty_addresses = Vec::<NetAddress>::new();
+               let empty_addresses = Vec::<SocketAddress>::new();
                write_tlv_fields!(writer, {
                        (0, self.features, required),
                        (2, self.last_update, required),
@@ -1160,7 +1160,7 @@ impl Readable for NodeAnnouncementInfo {
                        (8, announcement_message, option),
                        (10, _addresses, optional_vec), // deprecated, not used anymore
                });
-               let _: Option<Vec<NetAddress>> = _addresses;
+               let _: Option<Vec<SocketAddress>> = _addresses;
                Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(),
                        alias: alias.0.unwrap(), announcement_message })
        }
@@ -1236,7 +1236,7 @@ impl Writeable for NodeInfo {
 }
 
 // A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is
-// necessary to maintain compatibility with previous serializations of `NetAddress` that have an
+// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an
 // invalid hostname set. We ignore and eat all errors until we are either able to read a
 // `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
 struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
@@ -2039,7 +2039,7 @@ impl ReadOnlyNetworkGraph<'_> {
        /// Get network addresses by node id.
        /// Returns None if the requested node is completely unknown,
        /// or if node announcement for the node was never received.
-       pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<NetAddress>> {
+       pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<SocketAddress>> {
                self.nodes.get(&NodeId::from_pubkey(&pubkey))
                        .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
        }
index 79d54e22d6755896ee1c4ead6be0804413178a47..9c5fe8e1f9bd3dab59033644591a4a59b49d173b 100644 (file)
@@ -337,23 +337,34 @@ pub struct Route {
        /// [`BlindedTail`]s are present, then the pubkey of the last [`RouteHop`] in each path must be
        /// the same.
        pub paths: Vec<Path>,
-       /// The `payment_params` parameter passed via [`RouteParameters`] to [`find_route`].
+       /// The `route_params` parameter passed to [`find_route`].
        ///
        /// This is used by `ChannelManager` to track information which may be required for retries.
-       pub payment_params: Option<PaymentParameters>,
+       ///
+       /// Will be `None` for objects serialized with LDK versions prior to 0.0.117.
+       pub route_params: Option<RouteParameters>,
 }
 
 impl Route {
        /// Returns the total amount of fees paid on this [`Route`].
        ///
-       /// This doesn't include any extra payment made to the recipient, which can happen in excess of
-       /// the amount passed to [`find_route`]'s `route_params.final_value_msat`.
+       /// For objects serialized with LDK 0.0.117 and after, this includes any extra payment made to
+       /// the recipient, which can happen in excess of the amount passed to [`find_route`] via
+       /// [`RouteParameters::final_value_msat`], if we had to reach the [`htlc_minimum_msat`] limits.
+       ///
+       /// [`htlc_minimum_msat`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#the-channel_update-message
        pub fn get_total_fees(&self) -> u64 {
-               self.paths.iter().map(|path| path.fee_msat()).sum()
+               let overpaid_value_msat = self.route_params.as_ref()
+                       .map_or(0, |p| self.get_total_amount().saturating_sub(p.final_value_msat));
+               overpaid_value_msat + self.paths.iter().map(|path| path.fee_msat()).sum::<u64>()
        }
 
-       /// Returns the total amount paid on this [`Route`], excluding the fees. Might be more than
-       /// requested if we had to reach htlc_minimum_msat.
+       /// Returns the total amount paid on this [`Route`], excluding the fees.
+       ///
+       /// Might be more than requested as part of the given [`RouteParameters::final_value_msat`] if
+       /// we had to reach the [`htlc_minimum_msat`] limits.
+       ///
+       /// [`htlc_minimum_msat`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#the-channel_update-message
        pub fn get_total_amount(&self) -> u64 {
                self.paths.iter().map(|path| path.final_value_msat()).sum()
        }
@@ -383,8 +394,11 @@ impl Writeable for Route {
                        }
                }
                write_tlv_fields!(writer, {
-                       (1, self.payment_params, option),
+                       // For compatibility with LDK versions prior to 0.0.117, we take the individual
+                       // RouteParameters' fields and reconstruct them on read.
+                       (1, self.route_params.as_ref().map(|p| &p.payment_params), option),
                        (2, blinded_tails, optional_vec),
+                       (3, self.route_params.as_ref().map(|p| p.final_value_msat), option),
                });
                Ok(())
        }
@@ -411,6 +425,7 @@ impl Readable for Route {
                _init_and_read_len_prefixed_tlv_fields!(reader, {
                        (1, payment_params, (option: ReadableArgs, min_final_cltv_expiry_delta)),
                        (2, blinded_tails, optional_vec),
+                       (3, final_value_msat, option),
                });
                let blinded_tails = blinded_tails.unwrap_or(Vec::new());
                if blinded_tails.len() != 0 {
@@ -419,14 +434,23 @@ impl Readable for Route {
                                path.blinded_tail = blinded_tail_opt;
                        }
                }
-               Ok(Route { paths, payment_params })
+
+               // If we previously wrote the corresponding fields, reconstruct RouteParameters.
+               let route_params = match (payment_params, final_value_msat) {
+                       (Some(payment_params), Some(final_value_msat)) => {
+                               Some(RouteParameters { payment_params, final_value_msat })
+                       }
+                       _ => None,
+               };
+
+               Ok(Route { paths, route_params })
        }
 }
 
 /// Parameters needed to find a [`Route`].
 ///
 /// Passed to [`find_route`] and [`build_route_from_hops`].
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
 pub struct RouteParameters {
        /// The parameters of the failed payment path.
        pub payment_params: PaymentParameters,
@@ -435,6 +459,13 @@ pub struct RouteParameters {
        pub final_value_msat: u64,
 }
 
+impl RouteParameters {
+       /// Constructs [`RouteParameters`] from the given [`PaymentParameters`] and a payment amount.
+       pub fn from_payment_params_and_value(payment_params: PaymentParameters, final_value_msat: u64) -> Self {
+               Self { payment_params, final_value_msat }
+       }
+}
+
 impl Writeable for RouteParameters {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_tlv_fields!(writer, {
@@ -1400,19 +1431,21 @@ pub fn find_route<L: Deref, GL: Deref, S: ScoreLookUp>(
 ) -> Result<Route, LightningError>
 where L::Target: Logger, GL::Target: Logger {
        let graph_lock = network_graph.read_only();
-       let mut route = get_route(our_node_pubkey, &route_params.payment_params, &graph_lock, first_hops,
-               route_params.final_value_msat, logger, scorer, score_params,
-               random_seed_bytes)?;
+       let mut route = get_route(our_node_pubkey, &route_params, &graph_lock, first_hops, logger,
+               scorer, score_params, random_seed_bytes)?;
        add_random_cltv_offset(&mut route, &route_params.payment_params, &graph_lock, random_seed_bytes);
        Ok(route)
 }
 
 pub(crate) fn get_route<L: Deref, S: ScoreLookUp>(
-       our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
-       first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, logger: L, scorer: &S, score_params: &S::ScoreParams,
+       our_node_pubkey: &PublicKey, route_params: &RouteParameters, network_graph: &ReadOnlyNetworkGraph,
+       first_hops: Option<&[&ChannelDetails]>, logger: L, scorer: &S, score_params: &S::ScoreParams,
        _random_seed_bytes: &[u8; 32]
 ) -> Result<Route, LightningError>
 where L::Target: Logger {
+
+       let payment_params = &route_params.payment_params;
+       let final_value_msat = route_params.final_value_msat;
        // If we're routing to a blinded recipient, we won't have their node id. Therefore, keep the
        // unblinded payee id as an option. We also need a non-optional "payee id" for path construction,
        // so use a dummy id for this in the blinded case.
@@ -2480,7 +2513,7 @@ where L::Target: Logger {
                }
        }
 
-       let route = Route { paths, payment_params: Some(payment_params.clone()) };
+       let route = Route { paths, route_params: Some(route_params.clone()) };
        log_info!(logger, "Got route: {}", log_route!(route));
        Ok(route)
 }
@@ -2585,17 +2618,15 @@ pub fn build_route_from_hops<L: Deref, GL: Deref>(
 ) -> Result<Route, LightningError>
 where L::Target: Logger, GL::Target: Logger {
        let graph_lock = network_graph.read_only();
-       let mut route = build_route_from_hops_internal(
-               our_node_pubkey, hops, &route_params.payment_params, &graph_lock,
-               route_params.final_value_msat, logger, random_seed_bytes)?;
+       let mut route = build_route_from_hops_internal(our_node_pubkey, hops, &route_params,
+               &graph_lock, logger, random_seed_bytes)?;
        add_random_cltv_offset(&mut route, &route_params.payment_params, &graph_lock, random_seed_bytes);
        Ok(route)
 }
 
 fn build_route_from_hops_internal<L: Deref>(
-       our_node_pubkey: &PublicKey, hops: &[PublicKey], payment_params: &PaymentParameters,
-       network_graph: &ReadOnlyNetworkGraph, final_value_msat: u64, logger: L,
-       random_seed_bytes: &[u8; 32]
+       our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters,
+       network_graph: &ReadOnlyNetworkGraph, logger: L, random_seed_bytes: &[u8; 32],
 ) -> Result<Route, LightningError> where L::Target: Logger {
 
        struct HopScorer {
@@ -2642,8 +2673,7 @@ fn build_route_from_hops_internal<L: Deref>(
 
        let scorer = HopScorer { our_node_id, hop_ids };
 
-       get_route(our_node_pubkey, payment_params, network_graph, None, final_value_msat,
-               logger, &scorer, &(), random_seed_bytes)
+       get_route(our_node_pubkey, route_params, network_graph, None, logger, &scorer, &(), random_seed_bytes)
 }
 
 #[cfg(test)]
@@ -2653,7 +2683,7 @@ mod tests {
        use crate::routing::utxo::UtxoResult;
        use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
                BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
-               DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
+               DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE, RouteParameters};
        use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, ScoreLookUp, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
        use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel};
        use crate::chain::transaction::OutPoint;
@@ -2736,11 +2766,17 @@ mod tests {
 
                // Simple route to 2 via 1
 
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 0, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Cannot send a payment of 0 msat");
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 0);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Cannot send a payment of 0 msat");
                } else { panic!(); }
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -2771,12 +2807,15 @@ mod tests {
 
                let our_chans = vec![get_channel_details(Some(2), our_id, InitFeatures::from_le_bytes(vec![0b11]), 100000)];
 
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) =
-                       get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()),
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
+                               assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
                } else { panic!(); }
-
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
        }
 
@@ -2884,8 +2923,12 @@ mod tests {
                });
 
                // Not possible to send 199_999_999, because the minimum on channel=2 is 200_000_000.
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 199_999_999);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
 
                // Lift the restriction on the first hop.
@@ -2903,7 +2946,8 @@ mod tests {
                });
 
                // A payment above the minimum should pass
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
        }
 
@@ -2985,7 +3029,10 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 60_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                // Overpay fees to hit htlc_minimum_msat.
                let overpaid_fees = route.paths[0].hops[0].fee_msat + route.paths[1].hops[0].fee_msat;
                // TODO: this could be better balanced to overpay 10k and not 15k.
@@ -3030,14 +3077,17 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                // Fine to overpay for htlc_minimum_msat if it allows us to save fee.
                assert_eq!(route.paths.len(), 1);
                assert_eq!(route.paths[0].hops[0].short_channel_id, 12);
                let fees = route.paths[0].hops[0].fee_msat;
                assert_eq!(fees, 5_000);
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 50_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                // Not fine to overpay for htlc_minimum_msat if it requires paying more than fee on
                // the other channel.
                assert_eq!(route.paths.len(), 1);
@@ -3082,13 +3132,19 @@ mod tests {
                });
 
                // If all the channels require some features we don't understand, route should fail
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
 
                // If we specify a channel to node7, that overrides our local channel view and that gets used
-               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
+                       InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
@@ -3123,13 +3179,19 @@ mod tests {
                add_or_update_node(&gossip_sync, &secp_ctx, &privkeys[7], unknown_features.clone(), 1);
 
                // If all nodes require some features we don't understand, route should fail
-               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                       &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
 
                // If we specify a channel to node7, that overrides our local channel view and that gets used
-               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
+                       InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
@@ -3161,7 +3223,9 @@ mod tests {
 
                // Route to 1 via 2 and 3 because our channel to 1 is disabled
                let payment_params = PaymentParameters::from_node_id(nodes[0], 42);
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 3);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3187,8 +3251,12 @@ mod tests {
 
                // If we specify a channel to node7, that overrides our local channel view and that gets used
                let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
-               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
+                       InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
@@ -3310,14 +3378,21 @@ mod tests {
                let mut invalid_last_hops = last_hops_multi_private_channels(&nodes);
                invalid_last_hops.push(invalid_last_hop);
                {
-                       let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(invalid_last_hops).unwrap();
-                       if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Route hint cannot have the payee as the source.");
+                       let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                               .with_route_hints(invalid_last_hops).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+                       if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
+                               &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
+                               &random_seed_bytes) {
+                                       assert_eq!(err, "Route hint cannot have the payee as the source.");
                        } else { panic!(); }
                }
 
-               let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops_multi_private_channels(&nodes)).unwrap();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                       .with_route_hints(last_hops_multi_private_channels(&nodes)).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3392,8 +3467,9 @@ mod tests {
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
                // Test handling of an empty RouteHint passed in Invoice.
-
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3499,7 +3575,9 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3571,7 +3649,9 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &[42u8; 32]).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &[42u8; 32]).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3653,7 +3733,9 @@ mod tests {
                // This test shows that public routes can be present in the invoice
                // which would be handled in the same manner.
 
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3705,8 +3787,12 @@ mod tests {
                // Simple test with outbound channel to 4 to test that last_hops and first_hops connect
                let our_chans = vec![get_channel_details(Some(42), nodes[3].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
                let mut last_hops = last_hops(&nodes);
-               let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops.clone()).unwrap();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                       .with_route_hints(last_hops.clone()).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[3]);
@@ -3726,8 +3812,12 @@ mod tests {
                last_hops[0].0[0].fees.base_msat = 1000;
 
                // Revert to via 6 as the fee on 8 goes up
-               let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops).unwrap();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[6], 42)
+                       .with_route_hints(last_hops).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3761,7 +3851,9 @@ mod tests {
                assert_eq!(route.paths[0].hops[3].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
 
                // ...but still use 8 for larger payments as 6 has a variable feerate
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 2000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 2000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
 
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
@@ -3826,8 +3918,10 @@ mod tests {
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
                let logger = ln_test_utils::TestLogger::new();
                let network_graph = NetworkGraph::new(Network::Testnet, &logger);
-               let route = get_route(&source_node_id, &payment_params, &network_graph.read_only(),
-                               Some(&our_chans.iter().collect::<Vec<_>>()), route_val, &logger, &scorer, &(), &random_seed_bytes);
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, route_val);
+               let route = get_route(&source_node_id, &route_params, &network_graph.read_only(),
+                               Some(&our_chans.iter().collect::<Vec<_>>()), &logger, &scorer, &(),
+                               &random_seed_bytes);
                route
        }
 
@@ -3948,15 +4042,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 250_000_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 250_000_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None,
+                                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 250_000_000, Arc::clone(&logger), &scorer, &(),&random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 250_000_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(),&random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -3984,15 +4084,23 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 200_000_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(),
+                                       Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
+                                       &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 200_000_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(),
+                               Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                               &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4031,15 +4139,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4102,15 +4216,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 15_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4134,15 +4254,21 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 10_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 10_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route an exact amount we have should be fine.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 10_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
@@ -4246,15 +4372,21 @@ mod tests {
                });
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 60_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 49 sats (just a bit below the capacity).
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 49_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 49_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4267,7 +4399,10 @@ mod tests {
 
                {
                        // Attempt to route an exact amount is also fine
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 50_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4315,7 +4450,10 @@ mod tests {
                });
 
                {
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 50_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4480,8 +4618,10 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 300_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                               &our_id, &payment_params, &network_graph.read_only(), None, 300_000,
+                               &our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
                                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
@@ -4490,8 +4630,10 @@ mod tests {
                {
                        // Attempt to route while setting max_path_count to 0 results in a failure.
                        let zero_payment_params = payment_params.clone().with_max_path_count(0);
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               zero_payment_params, 100);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                               &our_id, &zero_payment_params, &network_graph.read_only(), None, 100,
+                               &our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
                                        assert_eq!(err, "Can't find a route with no paths allowed.");
                        } else { panic!(); }
@@ -4502,8 +4644,10 @@ mod tests {
                        // This is the case because the minimal_value_contribution_msat would require each path
                        // to account for 1/3 of the total value, which is violated by 2 out of 3 paths.
                        let fail_payment_params = payment_params.clone().with_max_path_count(3);
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               fail_payment_params, 250_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                               &our_id, &fail_payment_params, &network_graph.read_only(), None, 250_000,
+                               &our_id, &route_params, &network_graph.read_only(), None,
                                Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
                                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
@@ -4512,8 +4656,10 @@ mod tests {
                {
                        // Now, attempt to route 250 sats (just a bit below the capacity).
                        // Our algorithm should provide us with these 3 paths.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None,
-                               250_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 250_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4530,8 +4676,10 @@ mod tests {
 
                {
                        // Attempt to route an exact amount is also fine
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None,
-                               290_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 290_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -4565,7 +4713,8 @@ mod tests {
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
                let config = UserConfig::default();
-               let payment_params = PaymentParameters::from_node_id(nodes[3], 42).with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
+               let payment_params = PaymentParameters::from_node_id(nodes[3], 42)
+                       .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
 
                // We need a route consisting of 3 paths:
                // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}.
@@ -4700,16 +4849,22 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 350_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 350_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 300 sats (exact amount we can route).
                        // Our algorithm should provide us with these 3 paths, 100 sats each.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 300_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 300_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
 
                        let mut total_amount_paid_msat = 0;
@@ -4870,7 +5025,10 @@ mod tests {
                {
                        // Now, attempt to route 180 sats.
                        // Our algorithm should provide us with these 2 paths.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 180_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 180_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
 
                        let mut total_value_transferred_msat = 0;
@@ -5040,15 +5198,20 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 210_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 210_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 200 sats (exact amount we can route).
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 200_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 200_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
 
                        let mut total_amount_paid_msat = 0;
@@ -5148,7 +5311,10 @@ mod tests {
 
                // Get a route for 100 sats and check that we found the MPP route no problem and didn't
                // overpay at all.
-               let mut route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100_000);
+               let mut route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                route.paths.sort_by_key(|path| path.hops[0].short_channel_id);
                // Paths are manually ordered ordered by SCID, so:
@@ -5266,16 +5432,22 @@ mod tests {
 
                {
                        // Attempt to route more than available results in a failure.
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 150_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
-                                       &our_id, &payment_params, &network_graph.read_only(), None, 150_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
-                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                                       &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                                       &scorer, &(), &random_seed_bytes) {
+                                               assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
 
                {
                        // Now, attempt to route 125 sats (just a bit below the capacity of 3 channels).
                        // Our algorithm should provide us with these 3 paths.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 125_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 125_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -5288,7 +5460,10 @@ mod tests {
 
                {
                        // Attempt to route without the last small cheap channel
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 90_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
@@ -5427,7 +5602,10 @@ mod tests {
 
                {
                        // Now ensure the route flows simply over nodes 1 and 4 to 6.
-                       let route = get_route(&our_id, &payment_params, &network.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 10_000);
+                       let route = get_route(&our_id, &route_params, &network.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 3);
 
@@ -5498,7 +5676,10 @@ mod tests {
                {
                        // Now, attempt to route 90 sats, which is exactly 90 sats at the last hop, plus the
                        // 200% fee charged channel 13 in the 1-to-2 direction.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 90_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 2);
 
@@ -5564,7 +5745,10 @@ mod tests {
                        // Now, attempt to route 90 sats, hitting the htlc_minimum on channel 4, but
                        // overshooting the htlc_maximum on channel 2. Thus, we should pick the (absurdly
                        // expensive) channels 12-13 path.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 90_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 2);
 
@@ -5603,10 +5787,12 @@ mod tests {
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
                {
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 100_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 200_000),
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 10_000),
-                       ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 1);
 
@@ -5615,10 +5801,12 @@ mod tests {
                        assert_eq!(route.paths[0].hops[0].fee_msat, 100_000);
                }
                {
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 100_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 50_000),
-                       ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
                        assert_eq!(route.paths[0].hops.len(), 1);
                        assert_eq!(route.paths[1].hops.len(), 1);
@@ -5641,7 +5829,9 @@ mod tests {
                        // If we have several options above the 3xpayment value threshold, we should pick the
                        // smallest of them, avoiding further fragmenting our available outbound balance to
                        // this node.
-                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params, 100_000);
+                       let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(5), nodes[0], channelmanager::provided_init_features(&config), 50_000),
@@ -5650,7 +5840,7 @@ mod tests {
                                &get_channel_details(Some(8), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(9), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(4), nodes[0], channelmanager::provided_init_features(&config), 1_000_000),
-                       ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+                       ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 1);
 
@@ -5670,10 +5860,10 @@ mod tests {
                let scorer = ln_test_utils::TestScorer::new();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph.read_only(), None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let route = get_route( &our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 100);
@@ -5683,10 +5873,10 @@ mod tests {
                // Applying a 100 msat penalty to each hop results in taking channels 7 and 10 to nodes[6]
                // from nodes[2] rather than channel 6, 11, and 8, even though the longer path is cheaper.
                let scorer = FixedPenaltyScorer::with_penalty(100);
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph.read_only(), None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100);
+               let route = get_route( &our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 300);
@@ -5736,10 +5926,10 @@ mod tests {
                let scorer = ln_test_utils::TestScorer::new();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100);
+               let route = get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 100);
@@ -5748,10 +5938,8 @@ mod tests {
 
                // A different path to nodes[6] exists if channel 6 cannot be routed over.
                let scorer = BadChannelScorer { short_channel_id: 6 };
-               let route = get_route(
-                       &our_id, &payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ).unwrap();
+               let route = get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
 
                assert_eq!(route.get_total_fees(), 300);
@@ -5760,14 +5948,12 @@ mod tests {
 
                // A path to nodes[6] does not exist if nodes[2] cannot be routed through.
                let scorer = BadNodeScorer { node_id: NodeId::from_pubkey(&nodes[2]) };
-               match get_route(
-                       &our_id, &payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes
-               ) {
-                       Err(LightningError { err, .. } ) => {
-                               assert_eq!(err, "Failed to find a path to the given destination");
-                       },
-                       Ok(_) => panic!("Expected error"),
+               match get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes) {
+                               Err(LightningError { err, .. } ) => {
+                                       assert_eq!(err, "Failed to find a path to the given destination");
+                               },
+                               Ok(_) => panic!("Expected error"),
                }
        }
 
@@ -5791,7 +5977,7 @@ mod tests {
                                        short_channel_id: 0, fee_msat: 225, cltv_expiry_delta: 0
                                },
                        ], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
 
                assert_eq!(route.get_total_fees(), 250);
@@ -5824,7 +6010,7 @@ mod tests {
                                        short_channel_id: 0, fee_msat: 150, cltv_expiry_delta: 0
                                },
                        ], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
 
                assert_eq!(route.get_total_fees(), 200);
@@ -5836,7 +6022,7 @@ mod tests {
                // In an earlier version of `Route::get_total_fees` and `Route::get_total_amount`, they
                // would both panic if the route was completely empty. We test to ensure they return 0
                // here, even though its somewhat nonsensical as a route.
-               let route = Route { paths: Vec::new(), payment_params: None };
+               let route = Route { paths: Vec::new(), route_params: None };
 
                assert_eq!(route.get_total_fees(), 0);
                assert_eq!(route.get_total_amount(), 0);
@@ -5856,7 +6042,10 @@ mod tests {
                        .with_max_total_cltv_expiry_delta(feasible_max_total_cltv_delta);
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       feasible_payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert_ne!(path.len(), 0);
 
@@ -5864,7 +6053,10 @@ mod tests {
                let fail_max_total_cltv_delta = 23;
                let fail_payment_params = PaymentParameters::from_node_id(nodes[6], 0).with_route_hints(last_hops(&nodes)).unwrap()
                        .with_max_total_cltv_expiry_delta(fail_max_total_cltv_delta);
-               match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       fail_payment_params, 100);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
@@ -5889,9 +6081,16 @@ mod tests {
 
                // We should be able to find a route initially, and then after we fail a few random
                // channels eventually we won't be able to any longer.
-               assert!(get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).is_ok());
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               assert!(get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).is_ok());
                loop {
-                       if let Ok(route) = get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               payment_params.clone(), 100);
+                       if let Ok(route) = get_route(&our_id, &route_params, &network_graph, None,
+                               Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
+                       {
                                for chan in route.paths[0].hops.iter() {
                                        assert!(!payment_params.previously_failed_channels.contains(&chan.short_channel_id));
                                }
@@ -5914,15 +6113,19 @@ mod tests {
 
                // First check we can actually create a long route on this graph.
                let feasible_payment_params = PaymentParameters::from_node_id(nodes[18], 0);
-               let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       feasible_payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert!(path.len() == MAX_PATH_LENGTH_ESTIMATE.into());
 
                // But we can't create a path surpassing the MAX_PATH_LENGTH_ESTIMATE limit.
                let fail_payment_params = PaymentParameters::from_node_id(nodes[19], 0);
-               match get_route(&our_id, &fail_payment_params, &network_graph, None, 100,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       fail_payment_params, 100);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
@@ -5941,7 +6144,10 @@ mod tests {
                let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops(&nodes)).unwrap();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 1);
 
                let cltv_expiry_deltas_before = route.paths[0].hops.iter().map(|h| h.cltv_expiry_delta).collect::<Vec<u32>>();
@@ -5975,8 +6181,10 @@ mod tests {
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[4u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
-               let mut route = get_route(&our_id, &payment_params, &network_graph, None, 100,
-                                                                 Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), 100);
+               let mut route = get_route(&our_id, &route_params, &network_graph, None,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
                add_random_cltv_offset(&mut route, &payment_params, &network_graph, &random_seed_bytes);
 
                let mut path_plausibility = vec![];
@@ -6040,8 +6248,9 @@ mod tests {
 
                let payment_params = PaymentParameters::from_node_id(nodes[3], 0);
                let hops = [nodes[1], nodes[2], nodes[4], nodes[3]];
-               let route = build_route_from_hops_internal(&our_id, &hops, &payment_params,
-                        &network_graph, 100, Arc::clone(&logger), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
+               let route = build_route_from_hops_internal(&our_id, &hops, &route_params, &network_graph,
+                       Arc::clone(&logger), &random_seed_bytes).unwrap();
                let route_hop_pubkeys = route.paths[0].hops.iter().map(|hop| hop.pubkey).collect::<Vec<_>>();
                assert_eq!(hops.len(), route.paths[0].hops.len());
                for (idx, hop_pubkey) in hops.iter().enumerate() {
@@ -6088,7 +6297,10 @@ mod tests {
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
                // 100,000 sats is less than the available liquidity on each channel, set above.
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000_000, Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100_000_000);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!((route.paths[0].hops[1].short_channel_id == 4 && route.paths[1].hops[1].short_channel_id == 13) ||
                        (route.paths[1].hops[1].short_channel_id == 4 && route.paths[0].hops[1].short_channel_id == 13));
@@ -6189,17 +6401,22 @@ mod tests {
 
                // Then check we can get a normal route
                let payment_params = PaymentParameters::from_node_id(nodes[10], 42);
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 100);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_ok());
 
                // Then check that we can't get a route if we ban an intermediate node.
                scorer_params.add_banned(&NodeId::from_pubkey(&nodes[3]));
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_err());
 
                // Finally make sure we can route again, when we remove the ban.
                scorer_params.remove_banned(&NodeId::from_pubkey(&nodes[3]));
-               let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
+               let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
+                       Arc::clone(&logger), &scorer, &scorer_params, &random_seed_bytes);
                assert!(route.is_ok());
        }
 
@@ -6234,8 +6451,10 @@ mod tests {
 
                // Make sure we'll error if our route hints don't have enough liquidity according to their
                // htlc_maximum_msat.
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, max_htlc_msat + 1);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
-                       &payment_params, &netgraph, None, max_htlc_msat + 1, Arc::clone(&logger), &scorer, &(),
+                       &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes)
                {
                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
@@ -6247,8 +6466,10 @@ mod tests {
                let payment_params = PaymentParameters::from_node_id(dest_node_id, 42)
                        .with_route_hints(vec![route_hint_1, route_hint_2]).unwrap()
                        .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
-               let route = get_route(&our_id, &payment_params, &netgraph, None, max_htlc_msat + 1,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, max_htlc_msat + 1);
+               let route = get_route(&our_id, &route_params, &netgraph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
                assert!(route.paths[1].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6301,8 +6522,10 @@ mod tests {
                        .with_route_hints(vec![route_hint_1, route_hint_2]).unwrap()
                        .with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
 
-               let route = get_route(&our_node_id, &payment_params, &network_graph.read_only(),
-                       Some(&first_hop.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
+                       Some(&first_hop.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6315,8 +6538,8 @@ mod tests {
                        get_channel_details(Some(42), intermed_node_id, InitFeatures::from_le_bytes(vec![0b11]), amt_msat - 10),
                        get_channel_details(Some(43), intermed_node_id, InitFeatures::from_le_bytes(vec![0b11]), amt_msat - 10),
                ];
-               let route = get_route(&our_node_id, &payment_params, &network_graph.read_only(),
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6345,8 +6568,10 @@ mod tests {
                        (blinded_payinfo.clone(), blinded_path.clone()),
                        (blinded_payinfo.clone(), blinded_path.clone())])
                        .with_bolt12_features(bolt12_features).unwrap();
-               let route = get_route(&our_node_id, &payment_params, &network_graph.read_only(),
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
@@ -6396,7 +6621,7 @@ mod tests {
                                fee_msat: 100,
                                cltv_expiry_delta: 0,
                        }], blinded_tail: None }],
-                       payment_params: None,
+                       route_params: None,
                };
                let encoded_route = route.encode();
                let decoded_route: Route = Readable::read(&mut Cursor::new(&encoded_route[..])).unwrap();
@@ -6490,7 +6715,7 @@ mod tests {
                                excess_final_cltv_expiry_delta: 0,
                                final_value_msat: 200,
                        }),
-               }], payment_params: None};
+               }], route_params: None};
 
                let payment_params = PaymentParameters::from_node_id(ln_test_utils::pubkey(47), 18);
                let (_, network_graph, _, _, _) = build_line_graph();
@@ -6535,9 +6760,10 @@ mod tests {
                        features: BlindedHopFeatures::empty(),
                };
 
-               let final_amt_msat = 1001;
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo.clone(), blinded_path.clone())]);
-               let route = get_route(&our_id, &payment_params, &network_graph, None, final_amt_msat , Arc::clone(&logger),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, 1001);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
                        &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 1);
                assert_eq!(route.paths[0].hops.len(), 2);
@@ -6591,7 +6817,8 @@ mod tests {
                let payment_params = PaymentParameters::blinded(vec![
                        (blinded_payinfo.clone(), invalid_blinded_path.clone()),
                        (blinded_payinfo.clone(), invalid_blinded_path_2)]);
-               match get_route(&our_id, &payment_params, &network_graph, None, 1001, Arc::clone(&logger),
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
                        &scorer, &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
@@ -6602,8 +6829,9 @@ mod tests {
 
                invalid_blinded_path.introduction_node_id = our_id;
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo.clone(), invalid_blinded_path.clone())]);
-               match get_route(&our_id, &payment_params, &network_graph, None, 1001, Arc::clone(&logger),
-                       &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "Cannot generate a route to blinded paths if we are the introduction node to all of them");
@@ -6614,8 +6842,9 @@ mod tests {
                invalid_blinded_path.introduction_node_id = ln_test_utils::pubkey(46);
                invalid_blinded_path.blinded_hops.clear();
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo, invalid_blinded_path)]);
-               match get_route(&our_id, &payment_params, &network_graph, None, 1001, Arc::clone(&logger),
-                       &scorer, &(), &random_seed_bytes)
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
+               match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
+                       &(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "0-hop blinded path provided");
@@ -6667,8 +6896,9 @@ mod tests {
                let payment_params = PaymentParameters::blinded(blinded_hints.clone())
                        .with_bolt12_features(bolt12_features.clone()).unwrap();
 
-               let route = get_route(&our_id, &payment_params, &network_graph, None,
-                       100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100_000);
+               let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
+                       &scorer, &(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                let mut total_amount_paid_msat = 0;
                for path in route.paths.into_iter() {
@@ -6754,17 +6984,21 @@ mod tests {
                let payment_params = PaymentParameters::blinded(blinded_hints.clone());
 
                let netgraph = network_graph.read_only();
-               if let Err(LightningError { err, .. }) = get_route(&nodes[0], &payment_params, &netgraph,
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params.clone(), amt_msat);
+               if let Err(LightningError { err, .. }) = get_route(&nodes[0], &route_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
                        &random_seed_bytes) {
-                       assert_eq!(err, "Failed to find a path to the given destination");
+                               assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!("Expected error") }
 
                // Sending an exact amount accounting for the blinded path fee works.
                let amt_minus_blinded_path_fee = amt_msat - blinded_payinfo.fee_base_msat as u64;
-               let route = get_route(&nodes[0], &payment_params, &netgraph,
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_minus_blinded_path_fee,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_minus_blinded_path_fee);
+               let route = get_route(&nodes[0], &route_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_minus_blinded_path_fee);
        }
@@ -6829,9 +7063,11 @@ mod tests {
                        .with_bolt12_features(bolt12_features.clone()).unwrap();
 
                let netgraph = network_graph.read_only();
-               let route = get_route(&nodes[0], &payment_params, &netgraph,
-                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat,
-                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       payment_params, amt_msat);
+               let route = get_route(&nodes[0], &route_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes).unwrap();
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_msat);
        }
@@ -6963,10 +7199,12 @@ pub(crate) mod bench_utils {
                                let params = PaymentParameters::from_node_id(dst, 42)
                                        .with_bolt11_features(features.clone()).unwrap();
                                let first_hop = first_hop(src);
-                               let amt = starting_amount + seed % 1_000_000;
+                               let amt_msat = starting_amount + seed % 1_000_000;
+                               let route_params = RouteParameters::from_payment_params_and_value(
+                                       params.clone(), amt_msat);
                                let path_exists =
-                                       get_route(&payer, &params, &graph.read_only(), Some(&[&first_hop]),
-                                               amt, &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok();
+                                       get_route(&payer, &route_params, &graph.read_only(), Some(&[&first_hop]),
+                                               &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok();
                                if path_exists {
                                        // ...and seed the scorer with success and failure data...
                                        seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
@@ -6978,10 +7216,11 @@ pub(crate) mod bench_utils {
                                                let mpp_features = channelmanager::provided_invoice_features(&UserConfig::default());
                                                let params = PaymentParameters::from_node_id(dst, 42)
                                                        .with_bolt11_features(mpp_features).unwrap();
-
-                                               let route_res = get_route(&payer, &params, &graph.read_only(),
-                                                       Some(&[&first_hop]), score_amt, &TestLogger::new(), scorer,
-                                                       score_params, &random_seed_bytes);
+                                               let route_params = RouteParameters::from_payment_params_and_value(
+                                                       params.clone(), score_amt);
+                                               let route_res = get_route(&payer, &route_params, &graph.read_only(),
+                                                       Some(&[&first_hop]), &TestLogger::new(), scorer, score_params,
+                                                       &random_seed_bytes);
                                                if let Ok(route) = route_res {
                                                        for path in route.paths {
                                                                if seed & 0x80 == 0 {
@@ -6998,7 +7237,7 @@ pub(crate) mod bench_utils {
                                                score_amt /= 100;
                                        }
 
-                                       route_endpoints.push((first_hop, params, amt));
+                                       route_endpoints.push((first_hop, params, amt_msat));
                                        break;
                                }
                        }
@@ -7007,8 +7246,10 @@ pub(crate) mod bench_utils {
                // Because we've changed channel scores, it's possible we'll take different routes to the
                // selected destinations, possibly causing us to fail because, eg, the newly-selected path
                // requires a too-high CLTV delta.
-               route_endpoints.retain(|(first_hop, params, amt)| {
-                       get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+               route_endpoints.retain(|(first_hop, params, amt_msat)| {
+                       let route_params = RouteParameters::from_payment_params_and_value(
+                               params.clone(), *amt_msat);
+                       get_route(&payer, &route_params, &graph.read_only(), Some(&[first_hop]),
                                &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok()
                });
                route_endpoints.truncate(route_count);
@@ -7099,7 +7340,8 @@ pub mod benches {
                let mut idx = 0;
                bench.bench_function(bench_name, |b| b.iter(|| {
                        let (first_hop, params, amt) = &route_endpoints[idx % route_endpoints.len()];
-                       assert!(get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+                       let route_params = RouteParameters::from_payment_params_and_value(params.clone(), *amt);
+                       assert!(get_route(&payer, &route_params, &graph.read_only(), Some(&[first_hop]),
                                &DummyLogger{}, &scorer, score_params, &random_seed_bytes).is_ok());
                        idx += 1;
                }));
diff --git a/lightning/src/util/base32.rs b/lightning/src/util/base32.rs
new file mode 100644 (file)
index 0000000..2e66d59
--- /dev/null
@@ -0,0 +1,273 @@
+// This is a modification of base32 encoding to support the zbase32 alphabet.
+// The original piece of software can be found at https://crates.io/crates/base32(v0.4.0)
+// The original portions of this software are Copyright (c) 2015 The base32 Developers
+
+// This file is licensed under either of
+// Apache License, Version 2.0, (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
+// MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT) at your option.
+
+
+use crate::prelude::*;
+
+/// RFC4648 encoding table
+const RFC4648_ALPHABET: &'static [u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
+
+/// Zbase encoding alphabet
+const ZBASE_ALPHABET: &'static [u8] = b"ybndrfg8ejkmcpqxot1uwisza345h769";
+
+/// RFC4648 decoding table
+const RFC4648_INV_ALPHABET: [i8; 43] = [
+       -1, -1, 26, 27, 28, 29, 30, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8,
+       9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+];
+
+/// Zbase decoding table
+const ZBASE_INV_ALPHABET: [i8; 43] = [
+       -1, 18, -1, 25, 26, 27, 30, 29, 7, 31, -1, -1, -1, -1, -1, -1, -1, 24, 1, 12, 3, 8, 5, 6, 28,
+       21, 9, 10, -1, 11, 2, 16, 13, 14, 4, 22, 17, 19, -1, 20, 15, 0, 23,
+];
+
+/// Alphabet used for encoding and decoding.
+#[derive(Copy, Clone)]
+pub enum Alphabet {
+       /// RFC4648 encoding.
+       RFC4648 {
+               /// Whether to use padding.
+               padding: bool
+       },
+       /// Zbase32 encoding.
+       ZBase32
+}
+
+impl Alphabet {
+       /// Encode bytes into a base32 string.
+       pub fn encode(&self, data: &[u8]) -> String {
+               // output_length is calculated as follows:
+               // / 5 divides the data length by the number of bits per chunk (5),
+               // * 8 multiplies the result by the number of characters per chunk (8).
+               // + 4 rounds up to the nearest character.
+               let output_length = (data.len() * 8 + 4) / 5;
+               let mut ret = match self {
+                       Self::RFC4648 { padding } => {
+                               let mut ret = Self::encode_data(data, RFC4648_ALPHABET);
+                               if *padding {
+                                       let len = ret.len();
+                                       for i in output_length..len {
+                                               ret[i] = b'=';
+                                       }
+
+                                       return String::from_utf8(ret).expect("Invalid UTF-8");
+                               }
+                               ret
+                       },
+                       Self::ZBase32 => {
+                               Self::encode_data(data, ZBASE_ALPHABET)
+                       },
+               };
+               ret.truncate(output_length);
+
+               #[cfg(fuzzing)]
+               assert_eq!(ret.capacity(), (data.len() + 4) / 5 * 8);
+
+               String::from_utf8(ret).expect("Invalid UTF-8")
+       }
+
+       /// Decode a base32 string into a byte vector.
+       pub fn decode(&self, data: &str) -> Result<Vec<u8>, ()> {
+               let data = data.as_bytes();
+               let (data, alphabet) = match self {
+                       Self::RFC4648 { padding } => {
+                               let mut unpadded_data_length = data.len();
+                               if *padding {
+                                       if data.len() % 8 != 0 { return Err(()); }
+                                       data.iter().rev().take(6).for_each(|&c| {
+                                               if c == b'=' {
+                                                       unpadded_data_length -= 1;
+                                               }
+                                       });
+                               }
+                               (&data[..unpadded_data_length], RFC4648_INV_ALPHABET)
+                       },
+                       Self::ZBase32 => {
+                               (data, ZBASE_INV_ALPHABET)
+                       }
+               };
+               // If the string has more characters than are required to alphabet_encode the number of bytes
+               // decodable, treat the string as invalid.
+               match data.len() % 8 { 1|3|6 => return Err(()), _ => {} }
+               Ok(Self::decode_data(data, alphabet)?)
+       }
+
+       /// Encode a byte slice into a base32 string.
+       fn encode_data(data: &[u8], alphabet: &'static [u8]) -> Vec<u8> {
+               // cap is calculated as follows:
+               // / 5 divides the data length by the number of bits per chunk (5),
+               // * 8 multiplies the result by the number of characters per chunk (8).
+               // + 4 rounds up to the nearest character.
+               let cap = (data.len() + 4) / 5 * 8;
+               let mut ret = Vec::with_capacity(cap);
+               for chunk in data.chunks(5) {
+                       let mut buf = [0u8; 5];
+                       for (i, &b) in chunk.iter().enumerate() {
+                               buf[i] = b;
+                       }
+                       ret.push(alphabet[((buf[0] & 0xF8) >> 3) as usize]);
+                       ret.push(alphabet[(((buf[0] & 0x07) << 2) | ((buf[1] & 0xC0) >> 6)) as usize]);
+                       ret.push(alphabet[((buf[1] & 0x3E) >> 1) as usize]);
+                       ret.push(alphabet[(((buf[1] & 0x01) << 4) | ((buf[2] & 0xF0) >> 4)) as usize]);
+                       ret.push(alphabet[(((buf[2] & 0x0F) << 1) | (buf[3] >> 7)) as usize]);
+                       ret.push(alphabet[((buf[3] & 0x7C) >> 2) as usize]);
+                       ret.push(alphabet[(((buf[3] & 0x03) << 3) | ((buf[4] & 0xE0) >> 5)) as usize]);
+                       ret.push(alphabet[(buf[4] & 0x1F) as usize]);
+               }
+               #[cfg(fuzzing)]
+               assert_eq!(ret.capacity(), cap);
+
+               ret
+       }
+
+       fn decode_data(data: &[u8], alphabet: [i8; 43]) -> Result<Vec<u8>, ()> {
+               // cap is calculated as follows:
+               // / 8 divides the data length by the number of characters per chunk (8),
+               // * 5 multiplies the result by the number of bits per chunk (5),
+               // + 7 rounds up to the nearest byte.
+               let cap = (data.len() + 7) / 8 * 5;
+               let mut ret = Vec::with_capacity(cap);
+               for chunk in data.chunks(8) {
+                       let mut buf = [0u8; 8];
+                       for (i, &c) in chunk.iter().enumerate() {
+                               match alphabet.get(c.to_ascii_uppercase().wrapping_sub(b'0') as usize) {
+                                       Some(&-1) | None => return Err(()),
+                                       Some(&value) => buf[i] = value as u8,
+                               };
+                       }
+                       ret.push((buf[0] << 3) | (buf[1] >> 2));
+                       ret.push((buf[1] << 6) | (buf[2] << 1) | (buf[3] >> 4));
+                       ret.push((buf[3] << 4) | (buf[4] >> 1));
+                       ret.push((buf[4] << 7) | (buf[5] << 2) | (buf[6] >> 3));
+                       ret.push((buf[6] << 5) | buf[7]);
+               }
+               let output_length = data.len() * 5 / 8;
+               for c in ret.drain(output_length..) {
+                       if c != 0 {
+                               // If the original string had any bits set at positions outside of the encoded data,
+                               // treat the string as invalid.
+                               return Err(());
+                       }
+               }
+
+               // Check that our capacity calculation doesn't under-shoot in fuzzing
+               #[cfg(fuzzing)]
+               assert_eq!(ret.capacity(), cap);
+               Ok(ret)
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+
+       const ZBASE32_TEST_DATA: &[(&str, &[u8])] = &[
+               ("", &[]),
+               ("yy", &[0x00]),
+               ("oy", &[0x80]),
+               ("tqrey", &[0x8b, 0x88, 0x80]),
+               ("6n9hq", &[0xf0, 0xbf, 0xc7]),
+               ("4t7ye", &[0xd4, 0x7a, 0x04]),
+               ("6im5sdy", &[0xf5, 0x57, 0xbb, 0x0c]),
+               ("ybndrfg8ejkmcpqxot1uwisza345h769", &[0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6,
+               0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, 0xd7, 0xc6,
+               0x75, 0xbe, 0x77, 0xdf])
+       ];
+
+       #[test]
+       fn test_zbase32_encode() {
+               for &(zbase32, data) in ZBASE32_TEST_DATA {
+                       assert_eq!(Alphabet::ZBase32.encode(data), zbase32);
+               }
+       }
+
+       #[test]
+       fn test_zbase32_decode() {
+               for &(zbase32, data) in ZBASE32_TEST_DATA {
+                       assert_eq!(Alphabet::ZBase32.decode(zbase32).unwrap(), data);
+               }
+       }
+
+       #[test]
+       fn test_decode_wrong() {
+               const WRONG_DATA: &[&str] = &["00", "l1", "?", "="];
+               for &data in WRONG_DATA {
+                       match Alphabet::ZBase32.decode(data) {
+                               Ok(_) => assert!(false, "Data shouldn't be decodable"),
+                               Err(_) => assert!(true),
+                       }
+               }
+       }
+
+       const RFC4648_NON_PADDED_TEST_VECTORS: &[(&[u8], &[u8])] = &[
+               (&[0xF8, 0x3E, 0x7F, 0x83, 0xE7], b"7A7H7A7H"),
+               (&[0x77, 0xC1, 0xF7, 0x7C, 0x1F], b"O7A7O7A7"),
+               (&[0xF8, 0x3E, 0x7F, 0x83, 0xE7], b"7A7H7A7H"),
+               (&[0x77, 0xC1, 0xF7, 0x7C, 0x1F], b"O7A7O7A7"),
+       ];
+
+       const RFC4648_TEST_VECTORS: &[(&[u8], &str)] = &[
+               (b"", ""),
+               (b"f", "MY======"),
+               (b"fo", "MZXQ===="),
+               (b"foo", "MZXW6==="),
+               (b"foob", "MZXW6YQ="),
+               (b"fooba", "MZXW6YTB"),
+               (b"foobar", "MZXW6YTBOI======"),
+               (&[0xF8, 0x3E, 0x7F, 0x83], "7A7H7AY="),
+       ];
+
+       #[test]
+       fn test_rfc4648_encode() {
+               for (input, encoded) in RFC4648_TEST_VECTORS {
+                       assert_eq!(&Alphabet::RFC4648 { padding: true }.encode(input), encoded);
+               }
+
+               for (input, encoded) in RFC4648_NON_PADDED_TEST_VECTORS {
+                       assert_eq!(&Alphabet::RFC4648 { padding: false }.encode(input).as_bytes(), encoded);
+               }
+       }
+
+       #[test]
+       fn test_rfc4648_decode() {
+               for (input, encoded) in RFC4648_TEST_VECTORS {
+                       let res = &Alphabet::RFC4648 { padding: true }.decode(encoded).unwrap();
+                       assert_eq!(&res[..], &input[..]);
+               }
+
+               for (input, encoded) in RFC4648_NON_PADDED_TEST_VECTORS {
+                       let res = &Alphabet::RFC4648 { padding: false }.decode(std::str::from_utf8(encoded).unwrap()).unwrap();
+                       assert_eq!(&res[..], &input[..]);
+               }
+       }
+
+       #[test]
+       fn padding() {
+               let num_padding = [0, 6, 4, 3, 1];
+               for i in 1..6 {
+                       let encoded = Alphabet::RFC4648 { padding: true }.encode(
+                               (0..(i as u8)).collect::<Vec<u8>>().as_ref()
+                       );
+                       assert_eq!(encoded.len(), 8);
+                       for j in 0..(num_padding[i % 5]) {
+                               assert_eq!(encoded.as_bytes()[encoded.len() - j - 1], b'=');
+                       }
+                       for j in 0..(8 - num_padding[i % 5]) {
+                               assert!(encoded.as_bytes()[j] != b'=');
+                       }
+               }
+       }
+
+       #[test]
+       fn test_decode_rfc4648_errors() {
+               assert!(Alphabet::RFC4648 { padding: false }.decode("abc2def===").is_err()); // Invalid char because padding is disabled
+               assert!(Alphabet::RFC4648 { padding: true }.decode("abc2def===").is_err()); // Invalid length
+               assert!(Alphabet::RFC4648 { padding: true }.decode("MZX=6YTB").is_err()); // Invalid char
+       }
+}
index c729e6847470e024858911793b9273592b0f7fb6..f46b344f2ce144c235e40e041743e707570e43a5 100644 (file)
@@ -159,6 +159,30 @@ mod real_chacha {
                        chacha_bytes
                }
 
+               /// Encrypts `src` into `dest` using a single block from a ChaCha stream. Passing `dest` as
+               /// `src` in a second call will decrypt it.
+               pub fn encrypt_single_block(
+                       key: &[u8; 32], nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
+               ) {
+                       debug_assert_eq!(dest.len(), src.len());
+                       debug_assert!(dest.len() <= 32);
+
+                       let block = ChaCha20::get_single_block(key, nonce);
+                       for i in 0..dest.len() {
+                               dest[i] = block[i] ^ src[i];
+                       }
+               }
+
+               /// Same as `encrypt_single_block` only operates on a fixed-size input in-place.
+               pub fn encrypt_single_block_in_place(
+                       key: &[u8; 32], nonce: &[u8; 16], bytes: &mut [u8; 32]
+               ) {
+                       let block = ChaCha20::get_single_block(key, nonce);
+                       for i in 0..bytes.len() {
+                               bytes[i] = block[i] ^ bytes[i];
+                       }
+               }
+
                fn expand(key: &[u8], nonce: &[u8]) -> ChaChaState {
                        let constant = match key.len() {
                                16 => b"expand 16-byte k",
@@ -290,6 +314,17 @@ mod fuzzy_chacha {
                        [0; 32]
                }
 
+               pub fn encrypt_single_block(
+                       _key: &[u8; 32], _nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
+               ) {
+                       debug_assert_eq!(dest.len(), src.len());
+                       debug_assert!(dest.len() <= 32);
+               }
+
+               pub fn encrypt_single_block_in_place(
+                       _key: &[u8; 32], _nonce: &[u8; 16], _bytes: &mut [u8; 32]
+               ) {}
+
                pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
                        output.copy_from_slice(input);
                }
@@ -618,4 +653,49 @@ mod test {
 
                assert_eq!(ChaCha20::get_single_block(&key, &nonce_16bytes), block_bytes);
        }
+
+       #[test]
+       fn encrypt_single_block() {
+               let key = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+               ];
+               let nonce = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+                       0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+               ];
+               let bytes = [1; 32];
+
+               let mut encrypted_bytes = [0; 32];
+               ChaCha20::encrypt_single_block(&key, &nonce, &mut encrypted_bytes, &bytes);
+
+               let mut decrypted_bytes = [0; 32];
+               ChaCha20::encrypt_single_block(&key, &nonce, &mut decrypted_bytes, &encrypted_bytes);
+
+               assert_eq!(bytes, decrypted_bytes);
+       }
+
+       #[test]
+       fn encrypt_single_block_in_place() {
+               let key = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+               ];
+               let nonce = [
+                       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+                       0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+               ];
+               let unencrypted_bytes = [1; 32];
+               let mut bytes = unencrypted_bytes;
+
+               ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
+               assert_ne!(bytes, unencrypted_bytes);
+
+               ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
+               assert_eq!(bytes, unencrypted_bytes);
+       }
 }
index 617f71e42c6854cb5d106e7a20d7543b14332e3b..cdd00d92af9c5f3cd0a2fe3a61c3447310a0bba2 100644 (file)
@@ -24,7 +24,7 @@ macro_rules! hkdf_extract_expand {
                let (k1, k2, _) = hkdf_extract_expand!($salt, $ikm);
                (k1, k2)
        }};
-       ($salt: expr, $ikm: expr, 4) => {{
+       ($salt: expr, $ikm: expr, 5) => {{
                let (k1, k2, prk) = hkdf_extract_expand!($salt, $ikm);
 
                let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
@@ -35,7 +35,14 @@ macro_rules! hkdf_extract_expand {
                let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
                hmac.input(&k3);
                hmac.input(&[4; 1]);
-               (k1, k2, k3, Hmac::from_engine(hmac).into_inner())
+               let k4 = Hmac::from_engine(hmac).into_inner();
+
+               let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+               hmac.input(&k4);
+               hmac.input(&[5; 1]);
+               let k5 = Hmac::from_engine(hmac).into_inner();
+
+               (k1, k2, k3, k4, k5)
        }}
 }
 
@@ -43,8 +50,8 @@ pub fn hkdf_extract_expand_twice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32]
        hkdf_extract_expand!(salt, ikm, 2)
 }
 
-pub fn hkdf_extract_expand_4x(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32], [u8; 32]) {
-       hkdf_extract_expand!(salt, ikm, 4)
+pub fn hkdf_extract_expand_5x(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32], [u8; 32], [u8; 32]) {
+       hkdf_extract_expand!(salt, ikm, 5)
 }
 
 #[inline]
diff --git a/lightning/src/util/enforcing_trait_impls.rs b/lightning/src/util/enforcing_trait_impls.rs
deleted file mode 100644 (file)
index df0f13b..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
-use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
-use crate::ln::{chan_utils, msgs, PaymentPreimage};
-use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
-
-use crate::prelude::*;
-use core::cmp;
-use crate::sync::{Mutex, Arc};
-#[cfg(test)] use crate::sync::MutexGuard;
-
-use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
-use bitcoin::util::sighash;
-
-use bitcoin::secp256k1;
-use bitcoin::secp256k1::{SecretKey, PublicKey};
-use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
-use crate::events::bump_transaction::HTLCDescriptor;
-use crate::util::ser::{Writeable, Writer};
-use crate::io::Error;
-use crate::ln::features::ChannelTypeFeatures;
-
-/// Initial value for revoked commitment downward counter
-pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
-
-/// An implementation of Sign that enforces some policy checks.  The current checks
-/// are an incomplete set.  They include:
-///
-/// - When signing, the holder transaction has not been revoked
-/// - When revoking, the holder transaction has not been signed
-/// - The holder commitment number is monotonic and without gaps
-/// - The revoked holder commitment number is monotonic and without gaps
-/// - There is at least one unrevoked holder transaction at all times
-/// - The counterparty commitment number is monotonic and without gaps
-/// - The pre-derived keys and pre-built transaction in CommitmentTransaction were correctly built
-///
-/// Eventually we will probably want to expose a variant of this which would essentially
-/// be what you'd want to run on a hardware wallet.
-///
-/// Note that counterparty signatures on the holder transaction are not checked, but it should
-/// be in a complete implementation.
-///
-/// Note that before we do so we should ensure its serialization format has backwards- and
-/// forwards-compatibility prefix/suffixes!
-#[derive(Clone)]
-pub struct EnforcingSigner {
-       pub inner: InMemorySigner,
-       /// Channel state used for policy enforcement
-       pub state: Arc<Mutex<EnforcementState>>,
-       pub disable_revocation_policy_check: bool,
-}
-
-impl PartialEq for EnforcingSigner {
-       fn eq(&self, o: &Self) -> bool {
-               Arc::ptr_eq(&self.state, &o.state)
-       }
-}
-
-impl EnforcingSigner {
-       /// Construct an EnforcingSigner
-       pub fn new(inner: InMemorySigner) -> Self {
-               let state = Arc::new(Mutex::new(EnforcementState::new()));
-               Self {
-                       inner,
-                       state,
-                       disable_revocation_policy_check: false
-               }
-       }
-
-       /// Construct an EnforcingSigner with externally managed storage
-       ///
-       /// Since there are multiple copies of this struct for each channel, some coordination is needed
-       /// so that all copies are aware of enforcement state.  A pointer to this state is provided
-       /// here, usually by an implementation of KeysInterface.
-       pub fn new_with_revoked(inner: InMemorySigner, state: Arc<Mutex<EnforcementState>>, disable_revocation_policy_check: bool) -> Self {
-               Self {
-                       inner,
-                       state,
-                       disable_revocation_policy_check
-               }
-       }
-
-       pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features() }
-
-       #[cfg(test)]
-       pub fn get_enforcement_state(&self) -> MutexGuard<EnforcementState> {
-               self.state.lock().unwrap()
-       }
-}
-
-impl ChannelSigner for EnforcingSigner {
-       fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
-               self.inner.get_per_commitment_point(idx, secp_ctx)
-       }
-
-       fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
-               {
-                       let mut state = self.state.lock().unwrap();
-                       assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment);
-                       assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment);
-                       state.last_holder_revoked_commitment = idx;
-               }
-               self.inner.release_commitment_secret(idx)
-       }
-
-       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
-               let mut state = self.state.lock().unwrap();
-               let idx = holder_tx.commitment_number();
-               assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
-               state.last_holder_commitment = idx;
-               Ok(())
-       }
-
-       fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
-
-       fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
-
-       fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
-               self.inner.provide_channel_parameters(channel_parameters)
-       }
-}
-
-impl EcdsaChannelSigner for EnforcingSigner {
-       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
-
-               {
-                       let mut state = self.state.lock().unwrap();
-                       let actual_commitment_number = commitment_tx.commitment_number();
-                       let last_commitment_number = state.last_counterparty_commitment;
-                       // These commitment numbers are backwards counting.  We expect either the same as the previously encountered,
-                       // or the next one.
-                       assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number);
-                       // Ensure that the counterparty doesn't get more than two broadcastable commitments -
-                       // the last and the one we are trying to sign
-                       assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment);
-                       state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
-               }
-
-               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
-       }
-
-       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               let mut state = self.state.lock().unwrap();
-               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
-               state.last_counterparty_revoked_commitment = idx;
-               Ok(())
-       }
-
-       fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
-               let commitment_txid = trusted_tx.txid();
-               let holder_csv = self.inner.counterparty_selected_contest_delay();
-
-               let state = self.state.lock().unwrap();
-               let commitment_number = trusted_tx.commitment_number();
-               if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number {
-                       if !self.disable_revocation_policy_check {
-                               panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}",
-                                      state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0])
-                       }
-               }
-
-               for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
-                       assert!(this_htlc.transaction_output_index.is_some());
-                       let keys = trusted_tx.keys();
-                       let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.channel_type_features(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
-
-                       let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.channel_type_features(), &keys);
-
-                       let sighash_type = if self.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                               EcdsaSighashType::SinglePlusAnyoneCanPay
-                       } else {
-                               EcdsaSighashType::All
-                       };
-                       let sighash = hash_to_message!(
-                               &sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(
-                                       0, &htlc_redeemscript, this_htlc.amount_msat / 1000, sighash_type,
-                               ).unwrap()[..]
-                       );
-                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
-               }
-
-               Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
-       }
-
-       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
-       fn unsafe_sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               Ok(self.inner.unsafe_sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
-       }
-
-       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
-       }
-
-       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
-       }
-
-       fn sign_holder_htlc_transaction(
-               &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
-               secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()> {
-               assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input());
-               assert_eq!(htlc_tx.output[input], htlc_descriptor.tx_output(secp_ctx));
-               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
-       }
-
-       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
-       }
-
-       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               closing_tx.verify(self.inner.funding_outpoint().into_bitcoin_outpoint())
-                       .expect("derived different closing transaction");
-               Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
-       }
-
-       fn sign_holder_anchor_input(
-               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
-       ) -> Result<Signature, ()> {
-               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
-               // As long as our minimum dust limit is enforced and is greater than our anchor output
-               // value, an anchor output can only have an index within [0, 1].
-               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
-               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
-       }
-
-       fn sign_channel_announcement_with_funding_key(
-               &self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()> {
-               self.inner.sign_channel_announcement_with_funding_key(msg, secp_ctx)
-       }
-}
-
-impl WriteableEcdsaChannelSigner for EnforcingSigner {}
-
-impl Writeable for EnforcingSigner {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
-               // EnforcingSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
-               // ([`EnforcementState`]). `inner` is serialized here and deserialized by
-               // [`SignerProvider::read_chan_signer`]. `state` is managed by [`SignerProvider`]
-               // and will be serialized as needed by the implementation of that trait.
-               self.inner.write(writer)?;
-               Ok(())
-       }
-}
-
-impl EnforcingSigner {
-       fn verify_counterparty_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
-               commitment_tx.verify(&self.inner.get_channel_parameters().as_counterparty_broadcastable(),
-                                    self.inner.counterparty_pubkeys(), self.inner.pubkeys(), secp_ctx)
-                       .expect("derived different per-tx keys or built transaction")
-       }
-
-       fn verify_holder_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
-               commitment_tx.verify(&self.inner.get_channel_parameters().as_holder_broadcastable(),
-                                    self.inner.pubkeys(), self.inner.counterparty_pubkeys(), secp_ctx)
-                       .expect("derived different per-tx keys or built transaction")
-       }
-}
-
-/// The state used by [`EnforcingSigner`] in order to enforce policy checks
-///
-/// This structure is maintained by KeysInterface since we may have multiple copies of
-/// the signer and they must coordinate their state.
-#[derive(Clone)]
-pub struct EnforcementState {
-       /// The last counterparty commitment number we signed, backwards counting
-       pub last_counterparty_commitment: u64,
-       /// The last counterparty commitment they revoked, backwards counting
-       pub last_counterparty_revoked_commitment: u64,
-       /// The last holder commitment number we revoked, backwards counting
-       pub last_holder_revoked_commitment: u64,
-       /// The last validated holder commitment number, backwards counting
-       pub last_holder_commitment: u64,
-}
-
-impl EnforcementState {
-       /// Enforcement state for a new channel
-       pub fn new() -> Self {
-               EnforcementState {
-                       last_counterparty_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-               }
-       }
-}
index aa1fdfcee156216f6edcc79b3563a6b2f3f4e5e7..477cbcbdd9aa93493de6703e0d4d57cd81c135d2 100644 (file)
@@ -21,7 +21,7 @@
 //! <https://api.lightning.community/#signmessage>
 
 use crate::prelude::*;
-use crate::util::zbase32;
+use crate::util::base32;
 use bitcoin::hashes::{sha256d, Hash};
 use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId};
 use bitcoin::secp256k1::{Error, Message, PublicKey, Secp256k1, SecretKey};
@@ -29,118 +29,119 @@ use bitcoin::secp256k1::{Error, Message, PublicKey, Secp256k1, SecretKey};
 static LN_MESSAGE_PREFIX: &[u8] = b"Lightning Signed Message:";
 
 fn sigrec_encode(sig_rec: RecoverableSignature) -> Vec<u8> {
-    let (rid, rsig) = sig_rec.serialize_compact();
-    let prefix = rid.to_i32() as u8 + 31;
+       let (rid, rsig) = sig_rec.serialize_compact();
+       let prefix = rid.to_i32() as u8 + 31;
 
-    [&[prefix], &rsig[..]].concat()
+       [&[prefix], &rsig[..]].concat()
 }
 
 fn sigrec_decode(sig_rec: Vec<u8>) -> Result<RecoverableSignature, Error> {
-    // Signature must be 64 + 1 bytes long (compact signature + recovery id)
-    if sig_rec.len() != 65 {
-        return Err(Error::InvalidSignature);
-    }
-
-    let rsig = &sig_rec[1..];
-    let rid = sig_rec[0] as i32 - 31;
-
-    match RecoveryId::from_i32(rid) {
-        Ok(x) => RecoverableSignature::from_compact(rsig, x),
-        Err(e) => Err(e)
-    }
+       // Signature must be 64 + 1 bytes long (compact signature + recovery id)
+       if sig_rec.len() != 65 {
+               return Err(Error::InvalidSignature);
+       }
+
+       let rsig = &sig_rec[1..];
+       let rid = sig_rec[0] as i32 - 31;
+
+       match RecoveryId::from_i32(rid) {
+               Ok(x) => RecoverableSignature::from_compact(rsig, x),
+               Err(e) => Err(e)
+       }
 }
 
 /// Creates a digital signature of a message given a SecretKey, like the node's secret.
 /// A receiver knowing the PublicKey (e.g. the node's id) and the message can be sure that the signature was generated by the caller.
 /// Signatures are EC recoverable, meaning that given the message and the signature the PublicKey of the signer can be extracted.
 pub fn sign(msg: &[u8], sk: &SecretKey) -> Result<String, Error> {
-    let secp_ctx = Secp256k1::signing_only();
-    let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
+       let secp_ctx = Secp256k1::signing_only();
+       let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
 
-    let sig = secp_ctx.sign_ecdsa_recoverable(&Message::from_slice(&msg_hash)?, sk);
-    Ok(zbase32::encode(&sigrec_encode(sig)))
+       let sig = secp_ctx.sign_ecdsa_recoverable(&Message::from_slice(&msg_hash)?, sk);
+       Ok(base32::Alphabet::ZBase32.encode(&sigrec_encode(sig)))
 }
 
 /// Recovers the PublicKey of the signer of the message given the message and the signature.
 pub fn recover_pk(msg: &[u8], sig: &str) ->  Result<PublicKey, Error> {
-    let secp_ctx = Secp256k1::verification_only();
-    let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
-
-    match zbase32::decode(&sig) {
-        Ok(sig_rec) => {
-            match sigrec_decode(sig_rec) {
-                Ok(sig) => secp_ctx.recover_ecdsa(&Message::from_slice(&msg_hash)?, &sig),
-                Err(e) => Err(e)
-            }
-        },
-        Err(_) => Err(Error::InvalidSignature)
-    }
+       let secp_ctx = Secp256k1::verification_only();
+       let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
+
+       match base32::Alphabet::ZBase32.decode(&sig) {
+               Ok(sig_rec) => {
+                       match sigrec_decode(sig_rec) {
+                               Ok(sig) => secp_ctx.recover_ecdsa(&Message::from_slice(&msg_hash)?, &sig),
+                               Err(e) => Err(e)
+                       }
+               },
+               Err(_) => Err(Error::InvalidSignature)
+       }
 }
 
 /// Verifies a message was signed by a PrivateKey that derives to a given PublicKey, given a message, a signature,
 /// and the PublicKey.
 pub fn verify(msg: &[u8], sig: &str, pk: &PublicKey) -> bool {
-    match recover_pk(msg, sig) {
-        Ok(x) => x == *pk,
-        Err(_) => false
-    }
+       match recover_pk(msg, sig) {
+               Ok(x) => x == *pk,
+               Err(_) => false
+       }
 }
 
 #[cfg(test)]
 mod test {
-    use core::str::FromStr;
-    use crate::util::message_signing::{sign, recover_pk, verify};
-    use bitcoin::secp256k1::ONE_KEY;
-    use bitcoin::secp256k1::{PublicKey, Secp256k1};
-
-    #[test]
-    fn test_sign() {
-        let message = "test message";
-        let zbase32_sig = sign(message.as_bytes(), &ONE_KEY);
-
-        assert_eq!(zbase32_sig.unwrap(), "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e")
-    }
-
-    #[test]
-    fn test_recover_pk() {
-        let message = "test message";
-        let sig = "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e";
-        let pk = recover_pk(message.as_bytes(), sig);
-
-        assert_eq!(pk.unwrap(), PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY))
-    }
-
-    #[test]
-    fn test_verify() {
-        let message = "another message";
-        let sig = sign(message.as_bytes(), &ONE_KEY).unwrap();
-        let pk = PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY);
-
-        assert!(verify(message.as_bytes(), &sig, &pk))
-    }
-
-    #[test]
-    fn test_verify_ground_truth_ish() {
-        // There are no standard tests vectors for Sign/Verify, using the same tests vectors as c-lightning to see if they are compatible.
-        // Taken from https://github.com/ElementsProject/lightning/blob/1275af6fbb02460c8eb2f00990bb0ef9179ce8f3/tests/test_misc.py#L1925-L1938
-
-        let corpus = [
-            ["@bitconner",
-             "is this compatible?",
-             "rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t",
-             "02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5"],
-            ["@duck1123",
-             "hi",
-             "rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg",
-             "02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b"],
-            ["@jochemin",
-             "hi",
-             "ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3",
-             "022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931"],
-        ];
-
-        for c in &corpus {
-            assert!(verify(c[1].as_bytes(), c[2], &PublicKey::from_str(c[3]).unwrap()))
-        }
-    }
+       use core::str::FromStr;
+       use crate::util::message_signing::{sign, recover_pk, verify};
+       use bitcoin::secp256k1::ONE_KEY;
+       use bitcoin::secp256k1::{PublicKey, Secp256k1};
+
+       #[test]
+       fn test_sign() {
+               let message = "test message";
+               let zbase32_sig = sign(message.as_bytes(), &ONE_KEY);
+
+               assert_eq!(zbase32_sig.unwrap(), "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e")
+       }
+
+       #[test]
+       fn test_recover_pk() {
+               let message = "test message";
+               let sig = "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e";
+               let pk = recover_pk(message.as_bytes(), sig);
+
+               assert_eq!(pk.unwrap(), PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY))
+       }
+
+       #[test]
+       fn test_verify() {
+               let message = "another message";
+               let sig = sign(message.as_bytes(), &ONE_KEY).unwrap();
+               let pk = PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY);
+
+               assert!(verify(message.as_bytes(), &sig, &pk))
+       }
+
+       #[test]
+       fn test_verify_ground_truth_ish() {
+               // There are no standard tests vectors for Sign/Verify, using the same tests vectors as c-lightning to see if they are compatible.
+               // Taken from https://github.com/ElementsProject/lightning/blob/1275af6fbb02460c8eb2f00990bb0ef9179ce8f3/tests/test_misc.py#L1925-L1938
+
+               let corpus = [
+                       ["@bitconner",
+                       "is this compatible?",
+                       "rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t",
+                       "02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5"],
+                       ["@duck1123",
+                       "hi",
+                       "rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg",
+                       "02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b"],
+                       ["@jochemin",
+                       "hi",
+                       "ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3",
+                       "022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931"],
+               ];
+
+               for c in &corpus {
+                       assert!(verify(c[1].as_bytes(), c[2], &PublicKey::from_str(c[3]).unwrap()))
+               }
+       }
 }
+
index dd9d2744e1c7927ba82bab9244a355020b5d9660..e86885a83dbd0160b42bc103ecb5e737ea39bb7b 100644 (file)
@@ -22,14 +22,14 @@ pub mod invoice;
 pub mod persist;
 pub mod string;
 pub mod wakers;
+#[cfg(fuzzing)]
+pub mod base32;
+#[cfg(not(fuzzing))]
+pub(crate) mod base32;
 
 pub(crate) mod atomic_counter;
 pub(crate) mod byte_utils;
 pub(crate) mod chacha20;
-#[cfg(fuzzing)]
-pub mod zbase32;
-#[cfg(not(fuzzing))]
-pub(crate) mod zbase32;
 #[cfg(not(fuzzing))]
 pub(crate) mod poly1305;
 pub(crate) mod chacha20poly1305rfc;
@@ -56,5 +56,5 @@ pub mod test_utils;
 /// impls of traits that add exra enforcement on the way they're called. Useful for detecting state
 /// machine errors and used in fuzz targets and tests.
 #[cfg(any(test, feature = "_test_utils"))]
-pub mod enforcing_trait_impls;
+pub mod test_channel_signer;
 
index 435ef30d33198609b8cd38d110b1c1f9e70f7ddf..ca0605c95983afd3b370cafddaf82de2868dadfb 100644 (file)
@@ -4,13 +4,16 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-//! This module contains a simple key-value store trait KVStorePersister that
+//! This module contains a simple key-value store trait [`KVStore`] that
 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
 //! and [`ChannelMonitor`] all in one place.
 
 use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
+use bitcoin::hashes::hex::{FromHex, ToHex};
+use bitcoin::{BlockHash, Txid};
+
 use crate::io;
+use crate::prelude::{Vec, String};
 use crate::routing::scoring::WriteableScore;
 
 use crate::chain;
@@ -22,15 +25,93 @@ use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
 use crate::ln::channelmanager::ChannelManager;
 use crate::routing::router::Router;
 use crate::routing::gossip::NetworkGraph;
-use super::{logger::Logger, ser::Writeable};
-
-/// Trait for a key-value store for persisting some writeable object at some key
-/// Implementing `KVStorePersister` provides auto-implementations for [`Persister`]
-/// and [`Persist`] traits.  It uses "manager", "network_graph",
-/// and "monitors/{funding_txo_id}_{funding_txo_index}" for keys.
-pub trait KVStorePersister {
-       /// Persist the given writeable using the provided key
-       fn persist<W: Writeable>(&self, key: &str, object: &W) -> io::Result<()>;
+use crate::util::logger::Logger;
+use crate::util::ser::{ReadableArgs, Writeable};
+
+/// The alphabet of characters allowed for namespaces and keys.
+pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
+
+/// The maximum number of characters namespaces and keys may have.
+pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
+
+/// The namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
+
+/// The namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
+/// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
+
+/// The namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
+
+/// The namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
+
+/// Provides an interface that allows storage and retrieval of persisted values that are associated
+/// with given keys.
+///
+/// In order to avoid collisions the key space is segmented based on the given `namespace`s and
+/// `sub_namespace`s. Implementations of this trait are free to handle them in different ways, as
+/// long as per-namespace key uniqueness is asserted.
+///
+/// Keys and namespaces are required to be valid ASCII strings in the range of
+/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
+/// namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if `namespace` is
+/// empty, `sub_namespace` is required to be empty, too. This means that concerns should always be
+/// separated by namespace first, before sub-namespaces are used. While the number of namespaces
+/// will be relatively small and is determined at compile time, there may be many sub-namespaces
+/// per namespace. Note that per-namespace uniqueness needs to also hold for keys *and*
+/// namespaces/sub-namespaces in any given namespace/sub-namespace, i.e., conflicts between keys
+/// and equally named namespaces/sub-namespaces must be avoided.
+///
+/// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
+/// interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the
+/// data model previously assumed by `KVStorePersister::persist`.
+pub trait KVStore {
+       /// Returns the data stored for the given `namespace`, `sub_namespace`, and `key`.
+       ///
+       /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
+       /// `namespace` and `sub_namespace`.
+       ///
+       /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
+       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>>;
+       /// Persists the given data under the given `key`.
+       ///
+       /// Will create the given `namespace` and `sub_namespace` if not already present in the store.
+       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()>;
+       /// Removes any data that had previously been persisted under the given `key`.
+       ///
+       /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
+       /// remove the given `key` at some point in time after the method returns, e.g., as part of an
+       /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
+       /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
+       ///
+       /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
+       /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
+       /// potentially get lost on crash after the method returns. Therefore, this flag should only be
+       /// set for `remove` operations that can be safely replayed at a later time.
+       ///
+       /// Returns successfully if no data will be stored for the given `namespace`, `sub_namespace`, and
+       /// `key`, independently of whether it was present before its invokation or not.
+       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()>;
+       /// Returns a list of keys that are stored under the given `sub_namespace` in `namespace`.
+       ///
+       /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
+       /// returned keys. Returns an empty list if `namespace` or `sub_namespace` is unknown.
+       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>>;
 }
 
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
@@ -54,7 +135,8 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F:
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
 }
 
-impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
+
+impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
        where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
@@ -64,41 +146,107 @@ impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Dere
                R::Target: 'static + Router,
                L::Target: 'static + Logger,
 {
-       /// Persist the given ['ChannelManager'] to disk with the name "manager", returning an error if persistence failed.
+       /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
-               self.persist("manager", channel_manager)
+               self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
+                                  CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
+                                  CHANNEL_MANAGER_PERSISTENCE_KEY,
+                                  &channel_manager.encode())
        }
 
-       /// Persist the given [`NetworkGraph`] to disk with the name "network_graph", returning an error if persistence failed.
+       /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
-               self.persist("network_graph", network_graph)
+               self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
+                                  NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
+                                  NETWORK_GRAPH_PERSISTENCE_KEY,
+                                  &network_graph.encode())
        }
 
-       /// Persist the given [`WriteableScore`] to disk with name "scorer", returning an error if persistence failed.
+       /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
-               self.persist("scorer", &scorer)
+               self.write(SCORER_PERSISTENCE_NAMESPACE,
+                                  SCORER_PERSISTENCE_SUB_NAMESPACE,
+                                  SCORER_PERSISTENCE_KEY,
+                                  &scorer.encode())
        }
 }
 
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStorePersister> Persist<ChannelSigner> for K {
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
        // TODO: We really need a way for the persister to inform the user that its time to crash/shut
        // down once these start returning failure.
        // A PermanentFailure implies we should probably just shut down the node since we're
        // force-closing channels without even broadcasting!
 
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               match self.persist(&key, monitor) {
+               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               match self.write(
+                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       &key, &monitor.encode())
+               {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
                        Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
                }
        }
 
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               match self.persist(&key, monitor) {
+               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               match self.write(
+                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       &key, &monitor.encode())
+               {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
                        Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
                }
        }
 }
+
+/// Read previously persisted [`ChannelMonitor`]s from the store.
+pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
+       kv_store: K, entropy_source: ES, signer_provider: SP,
+) -> io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
+where
+       K::Target: KVStore,
+       ES::Target: EntropySource + Sized,
+       SP::Target: SignerProvider + Sized,
+{
+       let mut res = Vec::new();
+
+       for stored_key in kv_store.list(
+               CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
+       {
+               let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
+                       io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
+               })?;
+
+               let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
+                       io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
+               })?;
+
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+                       &mut io::Cursor::new(
+                               kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
+                       (&*entropy_source, &*signer_provider),
+               ) {
+                       Ok((block_hash, channel_monitor)) => {
+                               if channel_monitor.get_funding_txo().0.txid != txid
+                                       || channel_monitor.get_funding_txo().0.index != index
+                               {
+                                       return Err(io::Error::new(
+                                               io::ErrorKind::InvalidData,
+                                               "ChannelMonitor was stored under the wrong key",
+                                       ));
+                               }
+                               res.push((block_hash, channel_monitor));
+                       }
+                       Err(_) => {
+                               return Err(io::Error::new(
+                                       io::ErrorKind::InvalidData,
+                                       "Failed to deserialize ChannelMonitor"
+                               ))
+                       }
+               }
+       }
+       Ok(res)
+}
diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs
new file mode 100644 (file)
index 0000000..2fb1c49
--- /dev/null
@@ -0,0 +1,297 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
+use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
+use crate::ln::{chan_utils, msgs, PaymentPreimage};
+use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
+
+use crate::prelude::*;
+use core::cmp;
+use crate::sync::{Mutex, Arc};
+#[cfg(test)] use crate::sync::MutexGuard;
+
+use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
+use bitcoin::util::sighash;
+
+use bitcoin::secp256k1;
+use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+use crate::events::bump_transaction::HTLCDescriptor;
+use crate::util::ser::{Writeable, Writer};
+use crate::io::Error;
+use crate::ln::features::ChannelTypeFeatures;
+
+/// Initial value for revoked commitment downward counter
+pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
+
+/// An implementation of Sign that enforces some policy checks.  The current checks
+/// are an incomplete set.  They include:
+///
+/// - When signing, the holder transaction has not been revoked
+/// - When revoking, the holder transaction has not been signed
+/// - The holder commitment number is monotonic and without gaps
+/// - The revoked holder commitment number is monotonic and without gaps
+/// - There is at least one unrevoked holder transaction at all times
+/// - The counterparty commitment number is monotonic and without gaps
+/// - The pre-derived keys and pre-built transaction in CommitmentTransaction were correctly built
+///
+/// Eventually we will probably want to expose a variant of this which would essentially
+/// be what you'd want to run on a hardware wallet.
+///
+/// Note that counterparty signatures on the holder transaction are not checked, but it should
+/// be in a complete implementation.
+///
+/// Note that before we do so we should ensure its serialization format has backwards- and
+/// forwards-compatibility prefix/suffixes!
+#[derive(Clone)]
+pub struct TestChannelSigner {
+       pub inner: InMemorySigner,
+       /// Channel state used for policy enforcement
+       pub state: Arc<Mutex<EnforcementState>>,
+       pub disable_revocation_policy_check: bool,
+}
+
+impl PartialEq for TestChannelSigner {
+       fn eq(&self, o: &Self) -> bool {
+               Arc::ptr_eq(&self.state, &o.state)
+       }
+}
+
+impl TestChannelSigner {
+       /// Construct an TestChannelSigner
+       pub fn new(inner: InMemorySigner) -> Self {
+               let state = Arc::new(Mutex::new(EnforcementState::new()));
+               Self {
+                       inner,
+                       state,
+                       disable_revocation_policy_check: false
+               }
+       }
+
+       /// Construct an TestChannelSigner with externally managed storage
+       ///
+       /// Since there are multiple copies of this struct for each channel, some coordination is needed
+       /// so that all copies are aware of enforcement state.  A pointer to this state is provided
+       /// here, usually by an implementation of KeysInterface.
+       pub fn new_with_revoked(inner: InMemorySigner, state: Arc<Mutex<EnforcementState>>, disable_revocation_policy_check: bool) -> Self {
+               Self {
+                       inner,
+                       state,
+                       disable_revocation_policy_check
+               }
+       }
+
+       pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features() }
+
+       #[cfg(test)]
+       pub fn get_enforcement_state(&self) -> MutexGuard<EnforcementState> {
+               self.state.lock().unwrap()
+       }
+}
+
+impl ChannelSigner for TestChannelSigner {
+       fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
+               self.inner.get_per_commitment_point(idx, secp_ctx)
+       }
+
+       fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
+               {
+                       let mut state = self.state.lock().unwrap();
+                       assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment);
+                       assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment);
+                       state.last_holder_revoked_commitment = idx;
+               }
+               self.inner.release_commitment_secret(idx)
+       }
+
+       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+               let mut state = self.state.lock().unwrap();
+               let idx = holder_tx.commitment_number();
+               assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
+               state.last_holder_commitment = idx;
+               Ok(())
+       }
+
+       fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
+
+       fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
+
+       fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
+               self.inner.provide_channel_parameters(channel_parameters)
+       }
+}
+
+impl EcdsaChannelSigner for TestChannelSigner {
+       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
+
+               {
+                       let mut state = self.state.lock().unwrap();
+                       let actual_commitment_number = commitment_tx.commitment_number();
+                       let last_commitment_number = state.last_counterparty_commitment;
+                       // These commitment numbers are backwards counting.  We expect either the same as the previously encountered,
+                       // or the next one.
+                       assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number);
+                       // Ensure that the counterparty doesn't get more than two broadcastable commitments -
+                       // the last and the one we are trying to sign
+                       assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment);
+                       state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
+               }
+
+               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
+       }
+
+       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
+               let mut state = self.state.lock().unwrap();
+               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
+               state.last_counterparty_revoked_commitment = idx;
+               Ok(())
+       }
+
+       fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
+               let commitment_txid = trusted_tx.txid();
+               let holder_csv = self.inner.counterparty_selected_contest_delay();
+
+               let state = self.state.lock().unwrap();
+               let commitment_number = trusted_tx.commitment_number();
+               if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number {
+                       if !self.disable_revocation_policy_check {
+                               panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}",
+                                      state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0])
+                       }
+               }
+
+               for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
+                       assert!(this_htlc.transaction_output_index.is_some());
+                       let keys = trusted_tx.keys();
+                       let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.channel_type_features(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+                       let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.channel_type_features(), &keys);
+
+                       let sighash_type = if self.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+                               EcdsaSighashType::SinglePlusAnyoneCanPay
+                       } else {
+                               EcdsaSighashType::All
+                       };
+                       let sighash = hash_to_message!(
+                               &sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(
+                                       0, &htlc_redeemscript, this_htlc.amount_msat / 1000, sighash_type,
+                               ).unwrap()[..]
+                       );
+                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
+               }
+
+               Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
+       }
+
+       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+       fn unsafe_sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               Ok(self.inner.unsafe_sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
+       }
+
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
+       }
+
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
+       }
+
+       fn sign_holder_htlc_transaction(
+               &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
+               secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()> {
+               assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input());
+               assert_eq!(htlc_tx.output[input], htlc_descriptor.tx_output(secp_ctx));
+               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
+       }
+
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
+       }
+
+       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               closing_tx.verify(self.inner.funding_outpoint().into_bitcoin_outpoint())
+                       .expect("derived different closing transaction");
+               Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
+       }
+
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()> {
+               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
+               // As long as our minimum dust limit is enforced and is greater than our anchor output
+               // value, an anchor output can only have an index within [0, 1].
+               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
+               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
+       }
+
+       fn sign_channel_announcement_with_funding_key(
+               &self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()> {
+               self.inner.sign_channel_announcement_with_funding_key(msg, secp_ctx)
+       }
+}
+
+impl WriteableEcdsaChannelSigner for TestChannelSigner {}
+
+impl Writeable for TestChannelSigner {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
+               // TestChannelSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
+               // ([`EnforcementState`]). `inner` is serialized here and deserialized by
+               // [`SignerProvider::read_chan_signer`]. `state` is managed by [`SignerProvider`]
+               // and will be serialized as needed by the implementation of that trait.
+               self.inner.write(writer)?;
+               Ok(())
+       }
+}
+
+impl TestChannelSigner {
+       fn verify_counterparty_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
+               commitment_tx.verify(&self.inner.get_channel_parameters().as_counterparty_broadcastable(),
+                                    self.inner.counterparty_pubkeys(), self.inner.pubkeys(), secp_ctx)
+                       .expect("derived different per-tx keys or built transaction")
+       }
+
+       fn verify_holder_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
+               commitment_tx.verify(&self.inner.get_channel_parameters().as_holder_broadcastable(),
+                                    self.inner.pubkeys(), self.inner.counterparty_pubkeys(), secp_ctx)
+                       .expect("derived different per-tx keys or built transaction")
+       }
+}
+
+/// The state used by [`TestChannelSigner`] in order to enforce policy checks
+///
+/// This structure is maintained by KeysInterface since we may have multiple copies of
+/// the signer and they must coordinate their state.
+#[derive(Clone)]
+pub struct EnforcementState {
+       /// The last counterparty commitment number we signed, backwards counting
+       pub last_counterparty_commitment: u64,
+       /// The last counterparty commitment they revoked, backwards counting
+       pub last_counterparty_revoked_commitment: u64,
+       /// The last holder commitment number we revoked, backwards counting
+       pub last_holder_revoked_commitment: u64,
+       /// The last validated holder commitment number, backwards counting
+       pub last_holder_commitment: u64,
+}
+
+impl EnforcementState {
+       /// Enforcement state for a new channel
+       pub fn new() -> Self {
+               EnforcementState {
+                       last_counterparty_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+               }
+       }
+}
index 8cd95f62a930243adfb6d330fb28e6536651b8aa..8e2be87d8bef820b4bf56b19829aa09450a9f1d6 100644 (file)
@@ -35,9 +35,10 @@ use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParame
 use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
 use crate::sync::RwLock;
 use crate::util::config::UserConfig;
-use crate::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use crate::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use crate::util::logger::{Logger, Level, Record};
 use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable};
+use crate::util::persist::KVStore;
 
 use bitcoin::EcdsaSighashType;
 use bitcoin::blockdata::constants::ChainHash;
@@ -175,7 +176,7 @@ impl EntropySource for OnlyReadsKeysInterface {
        fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }}
 
 impl SignerProvider for OnlyReadsKeysInterface {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!(); }
 
@@ -185,7 +186,7 @@ impl SignerProvider for OnlyReadsKeysInterface {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        false
@@ -197,10 +198,10 @@ impl SignerProvider for OnlyReadsKeysInterface {
 }
 
 pub struct TestChainMonitor<'a> {
-       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingSigner>)>>,
+       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
        pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
        pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<EnforcingSigner>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
@@ -208,7 +209,7 @@ pub struct TestChainMonitor<'a> {
        pub expect_channel_force_closed: Mutex<Option<(ChannelId, bool)>>,
 }
 impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        monitor_updates: Mutex::new(HashMap::new()),
@@ -224,13 +225,13 @@ impl<'a> TestChainMonitor<'a> {
                self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
        }
 }
-impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
+impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
@@ -264,7 +265,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap();
                w.0.clear();
                monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == *monitor);
                self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
@@ -425,6 +426,97 @@ impl<Signer: sign::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> fo
        }
 }
 
+pub(crate) struct TestStore {
+       persisted_bytes: Mutex<HashMap<String, HashMap<String, Vec<u8>>>>,
+       read_only: bool,
+}
+
+impl TestStore {
+       pub fn new(read_only: bool) -> Self {
+               let persisted_bytes = Mutex::new(HashMap::new());
+               Self { persisted_bytes, read_only }
+       }
+}
+
+impl KVStore for TestStore {
+       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> {
+               let persisted_lock = self.persisted_bytes.lock().unwrap();
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+
+               if let Some(outer_ref) = persisted_lock.get(&prefixed) {
+                       if let Some(inner_ref) = outer_ref.get(key) {
+                               let bytes = inner_ref.clone();
+                               Ok(bytes)
+                       } else {
+                               Err(io::Error::new(io::ErrorKind::NotFound, "Key not found"))
+                       }
+               } else {
+                       Err(io::Error::new(io::ErrorKind::NotFound, "Namespace not found"))
+               }
+       }
+
+       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> {
+               if self.read_only {
+                       return Err(io::Error::new(
+                               io::ErrorKind::PermissionDenied,
+                               "Cannot modify read-only store",
+                       ));
+               }
+               let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+               let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new());
+               let mut bytes = Vec::new();
+               bytes.write_all(buf)?;
+               outer_e.insert(key.to_string(), bytes);
+               Ok(())
+       }
+
+       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> {
+               if self.read_only {
+                       return Err(io::Error::new(
+                               io::ErrorKind::PermissionDenied,
+                               "Cannot modify read-only store",
+                       ));
+               }
+
+               let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+               if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) {
+                               outer_ref.remove(&key.to_string());
+               }
+
+               Ok(())
+       }
+
+       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> {
+               let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+               let prefixed = if sub_namespace.is_empty() {
+                       namespace.to_string()
+               } else {
+                       format!("{}/{}", namespace, sub_namespace)
+               };
+               match persisted_lock.entry(prefixed) {
+                       hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()),
+                       hash_map::Entry::Vacant(_) => Ok(Vec::new()),
+               }
+       }
+}
+
 pub struct TestBroadcaster {
        pub txn_broadcasted: Mutex<Vec<Transaction>>,
        pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
@@ -979,16 +1071,16 @@ impl NodeSigner for TestKeysInterface {
 }
 
 impl SignerProvider for TestKeysInterface {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
                self.backing.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> EnforcingSigner {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner {
                let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id);
                let state = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+               TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
@@ -997,7 +1089,7 @@ impl SignerProvider for TestKeysInterface {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        self.disable_revocation_policy_check
@@ -1038,10 +1130,10 @@ impl TestKeysInterface {
                self
        }
 
-       pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> EnforcingSigner {
+       pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> TestChannelSigner {
                let keys = self.backing.derive_channel_keys(channel_value_satoshis, id);
                let state = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+               TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
        fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
diff --git a/lightning/src/util/zbase32.rs b/lightning/src/util/zbase32.rs
deleted file mode 100644 (file)
index 8a7bf35..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-// This is a modification of base32 encoding to support the zbase32 alphabet.
-// The original piece of software can be found at https://github.com/andreasots/base32
-// The original portions of this software are Copyright (c) 2015 The base32 Developers
-
-/* This file is licensed under either of
- *  Apache License, Version 2.0, (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
- *  MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT)
- * at your option.
-*/
-
-use crate::prelude::*;
-
-const ALPHABET: &'static [u8] = b"ybndrfg8ejkmcpqxot1uwisza345h769";
-
-/// Encodes some bytes as a zbase32 string
-pub fn encode(data: &[u8]) -> String {
-       let mut ret = Vec::with_capacity((data.len() + 4) / 5 * 8);
-
-       for chunk in data.chunks(5) {
-               let buf = {
-                       let mut buf = [0u8; 5];
-                       for (i, &b) in chunk.iter().enumerate() {
-                               buf[i] = b;
-                       }
-                       buf
-               };
-
-               ret.push(ALPHABET[((buf[0] & 0xF8) >> 3) as usize]);
-               ret.push(ALPHABET[(((buf[0] & 0x07) << 2) | ((buf[1] & 0xC0) >> 6)) as usize]);
-               ret.push(ALPHABET[((buf[1] & 0x3E) >> 1) as usize]);
-               ret.push(ALPHABET[(((buf[1] & 0x01) << 4) | ((buf[2] & 0xF0) >> 4)) as usize]);
-               ret.push(ALPHABET[(((buf[2] & 0x0F) << 1) | (buf[3] >> 7)) as usize]);
-               ret.push(ALPHABET[((buf[3] & 0x7C) >> 2) as usize]);
-               ret.push(ALPHABET[(((buf[3] & 0x03) << 3) | ((buf[4] & 0xE0) >> 5)) as usize]);
-               ret.push(ALPHABET[(buf[4] & 0x1F) as usize]);
-       }
-
-       ret.truncate((data.len() * 8 + 4) / 5);
-
-       // Check that our capacity calculation doesn't under-shoot in fuzzing
-       #[cfg(fuzzing)]
-       assert_eq!(ret.capacity(), (data.len() + 4) / 5 * 8);
-
-       String::from_utf8(ret).unwrap()
-}
-
-// ASCII 0-Z
-const INV_ALPHABET: [i8; 43] = [
-       -1, 18, -1, 25, 26, 27, 30, 29, 7, 31, -1, -1, -1, -1, -1, -1, -1,  24, 1, 12, 3, 8, 5, 6, 28,
-       21, 9, 10, -1, 11, 2, 16, 13, 14, 4, 22, 17, 19, -1, 20, 15, 0, 23,
-];
-
-/// Decodes a zbase32 string to the original bytes, failing if the string was not encoded by a
-/// proper zbase32 encoder.
-pub fn decode(data: &str) -> Result<Vec<u8>, ()> {
-       if !data.is_ascii() {
-               return Err(());
-       }
-
-       let data = data.as_bytes();
-       let output_length = data.len() * 5 / 8;
-       if data.len() > (output_length * 8 + 4) / 5 {
-               // If the string has more charachters than are required to encode the number of bytes
-               // decodable, treat the string as invalid.
-               return Err(());
-       }
-
-       let mut ret = Vec::with_capacity((data.len() + 7) / 8 * 5);
-
-       for chunk in data.chunks(8) {
-               let buf = {
-                       let mut buf = [0u8; 8];
-                       for (i, &c) in chunk.iter().enumerate() {
-                               match INV_ALPHABET.get(c.to_ascii_uppercase().wrapping_sub(b'0') as usize) {
-                                       Some(&-1) | None => return Err(()),
-                                       Some(&value) => buf[i] = value as u8,
-                               };
-                       }
-                       buf
-               };
-               ret.push((buf[0] << 3) | (buf[1] >> 2));
-               ret.push((buf[1] << 6) | (buf[2] << 1) | (buf[3] >> 4));
-               ret.push((buf[3] << 4) | (buf[4] >> 1));
-               ret.push((buf[4] << 7) | (buf[5] << 2) | (buf[6] >> 3));
-               ret.push((buf[6] << 5) | buf[7]);
-       }
-       for c in ret.drain(output_length..) {
-               if c != 0 {
-                       // If the original string had any bits set at positions outside of the encoded data,
-                       // treat the string as invalid.
-                       return Err(());
-               }
-       }
-
-       // Check that our capacity calculation doesn't under-shoot in fuzzing
-       #[cfg(fuzzing)]
-       assert_eq!(ret.capacity(), (data.len() + 7) / 8 * 5);
-
-       Ok(ret)
-}
-
-#[cfg(test)]
-mod tests {
-       use super::*;
-
-       const TEST_DATA: &[(&str, &[u8])] = &[
-               ("",       &[]),
-               ("yy",   &[0x00]),
-               ("oy",   &[0x80]),
-               ("tqrey",   &[0x8b, 0x88, 0x80]),
-               ("6n9hq",  &[0xf0, 0xbf, 0xc7]),
-               ("4t7ye",  &[0xd4, 0x7a, 0x04]),
-               ("6im5sdy", &[0xf5, 0x57, 0xbb, 0x0c]),
-               ("ybndrfg8ejkmcpqxot1uwisza345h769", &[0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6,
-                                                                                                       0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, 0xd7, 0xc6,
-                                                                                                       0x75, 0xbe, 0x77, 0xdf])
-       ];
-
-       #[test]
-       fn test_encode() {
-               for &(zbase32, data) in TEST_DATA {
-                       assert_eq!(encode(data), zbase32);
-               }
-       }
-
-       #[test]
-       fn test_decode() {
-               for &(zbase32, data) in TEST_DATA {
-                       assert_eq!(decode(zbase32).unwrap(), data);
-               }
-       }
-
-       #[test]
-       fn test_decode_wrong() {
-               const WRONG_DATA: &[&str] = &["00", "l1", "?", "="];
-
-               for &data in WRONG_DATA {
-                       match decode(data) {
-                               Ok(_) => assert!(false, "Data shouldn't be decodable"),
-                               Err(_) => assert!(true),
-                       }
-               }
-       }
-}
diff --git a/pending_changelog/invoice_request_failed_downgrade.txt b/pending_changelog/invoice_request_failed_downgrade.txt
new file mode 100644 (file)
index 0000000..d701cef
--- /dev/null
@@ -0,0 +1,3 @@
+## Backwards Compatibility
+
+* If an `Event::InvoiceRequestFailed` was generated for a BOLT 12 payment (#2371), downgrading will result in the payment silently failing if the event had not been processed yet.
diff --git a/pending_changelog/kvstore.txt b/pending_changelog/kvstore.txt
new file mode 100644 (file)
index 0000000..d96fd69
--- /dev/null
@@ -0,0 +1,3 @@
+## Backwards Compatibility
+
+* Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
diff --git a/pending_changelog/move_netaddress_to_socketaddress.txt b/pending_changelog/move_netaddress_to_socketaddress.txt
new file mode 100644 (file)
index 0000000..5153ed1
--- /dev/null
@@ -0,0 +1 @@
+* The `NetAddress` has been moved to `SocketAddress`. The fieds `IPv4` and `IPv6` are also rename to `TcpIpV4` and `TcpIpV6` (#2358).
diff --git a/pending_changelog/new_channel_id_type_pr_2485.txt b/pending_changelog/new_channel_id_type_pr_2485.txt
new file mode 100644 (file)
index 0000000..4ae3c2c
--- /dev/null
@@ -0,0 +1 @@
+* In several APIs, `channel_id` parameters have been changed from type `[u8; 32]` to newly introduced `ChannelId` type, from `ln` namespace (`lightning::ln::ChannelId`) (PR #2485)
diff --git a/pending_changelog/routes_route_params.txt b/pending_changelog/routes_route_params.txt
new file mode 100644 (file)
index 0000000..e88a1c7
--- /dev/null
@@ -0,0 +1,3 @@
+# Backwards Compatibility
+
+- `Route` objects written with LDK versions prior to 0.0.117 won't be retryable after being deserialized with LDK 0.0.117 or above.