Merge pull request #1500 from arik-so/2022-05-network-graph-rapid-sync-timestamp
authorArik Sosman <arik-so@users.noreply.github.com>
Tue, 31 May 2022 18:57:18 +0000 (11:57 -0700)
committerGitHub <noreply@github.com>
Tue, 31 May 2022 18:57:18 +0000 (11:57 -0700)
Add optional last_rapid_gossip_sync_timestamp field to NetworkGraph to enable optimized differential rapid syncing.

37 files changed:
fuzz/src/bin/gen_target.sh
fuzz/src/bin/msg_channel_ready_target.rs [new file with mode: 0644]
fuzz/src/bin/msg_funding_locked_target.rs [deleted file]
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/msg_targets/gen_target.sh
fuzz/src/msg_targets/mod.rs
fuzz/src/msg_targets/msg_channel_ready.rs [new file with mode: 0644]
fuzz/src/msg_targets/msg_funding_locked.rs [deleted file]
fuzz/src/router.rs
fuzz/targets.h
lightning-background-processor/src/lib.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/src/lib.rs
lightning-rapid-gossip-sync/Cargo.toml
lightning-rapid-gossip-sync/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/ln/wire.rs
lightning/src/routing/router.rs
lightning/src/util/config.rs
lightning/src/util/events.rs
lightning/src/util/scid_utils.rs
lightning/src/util/test_utils.rs

index 72fefe51609103c7ed7c8cbf74ed2b33f6b9552f..b88b72503779d72854fbbb213a77b0f82e474336 100755 (executable)
@@ -21,7 +21,7 @@ GEN_TEST msg_closing_signed msg_targets::
 GEN_TEST msg_commitment_signed msg_targets::
 GEN_TEST msg_decoded_onion_error_packet msg_targets::
 GEN_TEST msg_funding_created msg_targets::
-GEN_TEST msg_funding_locked msg_targets::
+GEN_TEST msg_channel_ready msg_targets::
 GEN_TEST msg_funding_signed msg_targets::
 GEN_TEST msg_init msg_targets::
 GEN_TEST msg_open_channel msg_targets::
diff --git a/fuzz/src/bin/msg_channel_ready_target.rs b/fuzz/src/bin/msg_channel_ready_target.rs
new file mode 100644 (file)
index 0000000..d11068b
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::msg_targets::msg_channel_ready::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               msg_channel_ready_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       msg_channel_ready_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       msg_channel_ready_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       msg_channel_ready_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               msg_channel_ready_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/msg_channel_ready") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               msg_channel_ready_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
diff --git a/fuzz/src/bin/msg_funding_locked_target.rs b/fuzz/src/bin/msg_funding_locked_target.rs
deleted file mode 100644 (file)
index 986f7f8..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-// This file is auto-generated by gen_target.sh based on target_template.txt
-// To modify it, modify target_template.txt and run gen_target.sh instead.
-
-#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
-
-#[cfg(not(fuzzing))]
-compile_error!("Fuzz targets need cfg=fuzzing");
-
-extern crate lightning_fuzz;
-use lightning_fuzz::msg_targets::msg_funding_locked::*;
-
-#[cfg(feature = "afl")]
-#[macro_use] extern crate afl;
-#[cfg(feature = "afl")]
-fn main() {
-       fuzz!(|data| {
-               msg_funding_locked_run(data.as_ptr(), data.len());
-       });
-}
-
-#[cfg(feature = "honggfuzz")]
-#[macro_use] extern crate honggfuzz;
-#[cfg(feature = "honggfuzz")]
-fn main() {
-       loop {
-               fuzz!(|data| {
-                       msg_funding_locked_run(data.as_ptr(), data.len());
-               });
-       }
-}
-
-#[cfg(feature = "libfuzzer_fuzz")]
-#[macro_use] extern crate libfuzzer_sys;
-#[cfg(feature = "libfuzzer_fuzz")]
-fuzz_target!(|data: &[u8]| {
-       msg_funding_locked_run(data.as_ptr(), data.len());
-});
-
-#[cfg(feature = "stdin_fuzz")]
-fn main() {
-       use std::io::Read;
-
-       let mut data = Vec::with_capacity(8192);
-       std::io::stdin().read_to_end(&mut data).unwrap();
-       msg_funding_locked_run(data.as_ptr(), data.len());
-}
-
-#[test]
-fn run_test_cases() {
-       use std::fs;
-       use std::io::Read;
-       use lightning_fuzz::utils::test_logger::StringBuffer;
-
-       use std::sync::{atomic, Arc};
-       {
-               let data: Vec<u8> = vec![0];
-               msg_funding_locked_run(data.as_ptr(), data.len());
-       }
-       let mut threads = Vec::new();
-       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
-       if let Ok(tests) = fs::read_dir("test_cases/msg_funding_locked") {
-               for test in tests {
-                       let mut data: Vec<u8> = Vec::new();
-                       let path = test.unwrap().path();
-                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
-                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
-
-                       let thread_count_ref = Arc::clone(&threads_running);
-                       let main_thread_ref = std::thread::current();
-                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
-                               std::thread::spawn(move || {
-                                       let string_logger = StringBuffer::new();
-
-                                       let panic_logger = string_logger.clone();
-                                       let res = if ::std::panic::catch_unwind(move || {
-                                               msg_funding_locked_test(&data, panic_logger);
-                                       }).is_err() {
-                                               Some(string_logger.into_string())
-                                       } else { None };
-                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
-                                       main_thread_ref.unpark();
-                                       res
-                               })
-                       ));
-                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
-                               std::thread::park();
-                       }
-               }
-       }
-       let mut failed_outputs = Vec::new();
-       for (test, thread) in threads.drain(..) {
-               if let Some(output) = thread.join().unwrap() {
-                       println!("\nOutput of {}:\n{}\n", test, output);
-                       failed_outputs.push(test);
-               }
-       }
-       if !failed_outputs.is_empty() {
-               println!("Test cases which failed: ");
-               for case in failed_outputs {
-                       println!("{}", case);
-               }
-               panic!();
-       }
-}
index 1b662c0e721c05b49b55bd65a35af52026988481..9625677c263378f3a9eed66be03e78989def25d4 100644 (file)
@@ -490,10 +490,10 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        }
                        for (idx, node_event) in node_events.iter().enumerate() {
                                for event in node_event {
-                                       if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
+                                       if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event {
                                                for node in $nodes.iter() {
                                                        if node.get_our_node_id() == *node_id {
-                                                               node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg);
+                                                               node.handle_channel_ready(&$nodes[idx].get_our_node_id(), msg);
                                                        }
                                                }
                                        } else { panic!("Wrong event type"); }
@@ -597,7 +597,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                                        if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); }
                                                        *node_id == a_id
                                                },
-                                               events::MessageSendEvent::SendFundingLocked { .. } => continue,
+                                               events::MessageSendEvent::SendChannelReady { .. } => continue,
                                                events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue,
                                                events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
                                                        assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
@@ -725,7 +725,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                                                }
                                                        }
                                                },
-                                               events::MessageSendEvent::SendFundingLocked { .. } => {
+                                               events::MessageSendEvent::SendChannelReady { .. } => {
                                                        // Can be generated as a reestablish response
                                                },
                                                events::MessageSendEvent::SendAnnouncementSignatures { .. } => {
@@ -771,7 +771,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                                        events::MessageSendEvent::UpdateHTLCs { .. } => {},
                                                        events::MessageSendEvent::SendRevokeAndACK { .. } => {},
                                                        events::MessageSendEvent::SendChannelReestablish { .. } => {},
-                                                       events::MessageSendEvent::SendFundingLocked { .. } => {},
+                                                       events::MessageSendEvent::SendChannelReady { .. } => {},
                                                        events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
                                                        events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
                                                                assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
@@ -792,7 +792,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                                        events::MessageSendEvent::UpdateHTLCs { .. } => {},
                                                        events::MessageSendEvent::SendRevokeAndACK { .. } => {},
                                                        events::MessageSendEvent::SendChannelReestablish { .. } => {},
-                                                       events::MessageSendEvent::SendFundingLocked { .. } => {},
+                                                       events::MessageSendEvent::SendChannelReady { .. } => {},
                                                        events::MessageSendEvent::SendAnnouncementSignatures { .. } => {},
                                                        events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => {
                                                                assert_eq!(msg.contents.flags & 2, 0); // The disable bit must never be set!
@@ -840,13 +840,14 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                                                events::Event::PaymentReceived { payment_hash, .. } => {
                                                        if claim_set.insert(payment_hash.0) {
                                                                if $fail {
-                                                                       assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
+                                                                       nodes[$node].fail_htlc_backwards(&payment_hash);
                                                                } else {
-                                                                       assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
+                                                                       nodes[$node].claim_funds(PaymentPreimage(payment_hash.0));
                                                                }
                                                        }
                                                },
                                                events::Event::PaymentSent { .. } => {},
+                                               events::Event::PaymentClaimed { .. } => {},
                                                events::Event::PaymentPathSuccessful { .. } => {},
                                                events::Event::PaymentPathFailed { .. } => {},
                                                events::Event::PaymentForwarded { .. } if $node == 1 => {},
index e3c8290b16ea820e4a26bf585a5684384f1f46c3..00b123b84dd1942da7f20d1be5fdf10f2cd8a1fd 100644 (file)
@@ -739,12 +739,12 @@ mod tests {
                // 0c0000 - connect a block with no transactions
                // 0c0000 - connect a block with no transactions
                // 0c0000 - connect a block with no transactions
-               // - by now client should have sent a funding_locked (CHECK 3: SendFundingLocked to 03000000 for chan 3d000000)
+               // - by now client should have sent a channel_ready (CHECK 3: SendChannelReady to 03000000 for chan 3d000000)
                //
                // 030012 - inbound read from peer id 0 of len 18
                // 0043 03000000000000000000000000000000 - message header indicating message length 67
                // 030053 - inbound read from peer id 0 of len 83
-               // 0024 3d00000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - funding_locked and mac
+               // 0024 3d00000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - channel_ready and mac
                //
                // 01 - new inbound connection with id 1
                // 030132 - inbound read from peer id 1 of len 50
@@ -775,12 +775,12 @@ mod tests {
                // 0023 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_signed message and mac
                //
                // 0b - broadcast funding transaction
-               // - by now client should have sent a funding_locked (CHECK 4: SendFundingLocked to 03020000 for chan 3f000000)
+               // - by now client should have sent a channel_ready (CHECK 4: SendChannelReady to 03020000 for chan 3f000000)
                //
                // 030112 - inbound read from peer id 1 of len 18
                // 0043 01000000000000000000000000000000 - message header indicating message length 67
                // 030153 - inbound read from peer id 1 of len 83
-               // 0024 3a00000000000000000000000000000000000000000000000000000000000000 026700000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_locked and mac
+               // 0024 3a00000000000000000000000000000000000000000000000000000000000000 026700000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - channel_ready and mac
                //
                // 030012 - inbound read from peer id 0 of len 18
                // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
@@ -964,8 +964,8 @@ mod tests {
                let log_entries = logger.lines.lock().unwrap();
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingSigned event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 2
-               assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 3
-               assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 4
+               assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendChannelReady event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 3
+               assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendChannelReady event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 4
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendRevokeAndACK event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&4)); // 5
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 6
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7
index 0e930cfa1ac7b85b1f871c0bc745bdb752e128eb..848793db50082b20cbe1c520dd430bde30466140 100755 (executable)
@@ -16,7 +16,7 @@ GEN_TEST AnnouncementSignatures test_msg_simple ""
 GEN_TEST ClosingSigned test_msg_simple ""
 GEN_TEST CommitmentSigned test_msg_simple ""
 GEN_TEST FundingCreated test_msg_simple ""
-GEN_TEST FundingLocked test_msg_simple ""
+GEN_TEST ChannelReady test_msg_simple ""
 GEN_TEST FundingSigned test_msg_simple ""
 GEN_TEST GossipTimestampFilter test_msg_simple ""
 GEN_TEST Init test_msg_simple ""
index 8acb690b04bf92a0f1df4028fbc6f00eef0cc8b1..dd1540ce51ec51f21c3bdc28fed92d31bd194127 100644 (file)
@@ -4,7 +4,7 @@ pub mod msg_announcement_signatures;
 pub mod msg_closing_signed;
 pub mod msg_commitment_signed;
 pub mod msg_funding_created;
-pub mod msg_funding_locked;
+pub mod msg_channel_ready;
 pub mod msg_funding_signed;
 pub mod msg_gossip_timestamp_filter;
 pub mod msg_init;
diff --git a/fuzz/src/msg_targets/msg_channel_ready.rs b/fuzz/src/msg_targets/msg_channel_ready.rs
new file mode 100644 (file)
index 0000000..e5f3b25
--- /dev/null
@@ -0,0 +1,27 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on msg_target_template.txt
+// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+
+use lightning::ln::msgs;
+
+use msg_targets::utils::VecWriter;
+use utils::test_logger;
+
+#[inline]
+pub fn msg_channel_ready_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       test_msg_simple!(msgs::ChannelReady, data);
+}
+
+#[no_mangle]
+pub extern "C" fn msg_channel_ready_run(data: *const u8, datalen: usize) {
+       let data = unsafe { std::slice::from_raw_parts(data, datalen) };
+       test_msg_simple!(msgs::ChannelReady, data);
+}
diff --git a/fuzz/src/msg_targets/msg_funding_locked.rs b/fuzz/src/msg_targets/msg_funding_locked.rs
deleted file mode 100644 (file)
index 2c6ad63..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-// This file is auto-generated by gen_target.sh based on msg_target_template.txt
-// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
-
-use lightning::ln::msgs;
-
-use msg_targets::utils::VecWriter;
-use utils::test_logger;
-
-#[inline]
-pub fn msg_funding_locked_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
-       test_msg_simple!(msgs::FundingLocked, data);
-}
-
-#[no_mangle]
-pub extern "C" fn msg_funding_locked_run(data: *const u8, datalen: usize) {
-       let data = unsafe { std::slice::from_raw_parts(data, datalen) };
-       test_msg_simple!(msgs::FundingLocked, data);
-}
index 786bfa3e589eb05dc393a807be90efc46c5febf7..bb6ba2c6e51cb0a2ac65d86023279c08954532b9 100644 (file)
@@ -222,12 +222,13 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                                channel_type: None,
                                                                short_channel_id: Some(scid),
                                                                inbound_scid_alias: None,
+                                                               outbound_scid_alias: None,
                                                                channel_value_satoshis: capacity,
                                                                user_channel_id: 0, inbound_capacity_msat: 0,
                                                                unspendable_punishment_reserve: None,
                                                                confirmations_required: None,
                                                                force_close_spend_delay: None,
-                                                               is_outbound: true, is_funding_locked: true,
+                                                               is_outbound: true, is_channel_ready: true,
                                                                is_usable: true, is_public: true,
                                                                balance_msat: 0,
                                                                outbound_capacity_msat: capacity.saturating_mul(1000),
index 798fb66479519cf9b00a84ce3d5873787c449b58..d142aa6487eb36e8e436457be951d84771162f8f 100644 (file)
@@ -13,7 +13,7 @@ void msg_closing_signed_run(const unsigned char* data, size_t data_len);
 void msg_commitment_signed_run(const unsigned char* data, size_t data_len);
 void msg_decoded_onion_error_packet_run(const unsigned char* data, size_t data_len);
 void msg_funding_created_run(const unsigned char* data, size_t data_len);
-void msg_funding_locked_run(const unsigned char* data, size_t data_len);
+void msg_channel_ready_run(const unsigned char* data, size_t data_len);
 void msg_funding_signed_run(const unsigned char* data, size_t data_len);
 void msg_init_run(const unsigned char* data, size_t data_len);
 void msg_open_channel_run(const unsigned char* data, size_t data_len);
index dab376178488654d70322155400c0bd263b740a4..95c753bca80521ef9bf203b00f94d477881ec386 100644 (file)
@@ -759,12 +759,12 @@ mod tests {
 
                // Confirm the funding transaction.
                confirm_transaction(&mut nodes[0], &funding_tx);
-               let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
+               let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
                confirm_transaction(&mut nodes[1], &funding_tx);
-               let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
-               nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
+               let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
                let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
-               nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
+               nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
                let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
 
                assert!(bg_processor.stop().is_ok());
index 9b3b41afbedae5f938c01968f221a843870d376c..9f6691564ca64399b9e3299096783248187bc043 100644 (file)
@@ -672,10 +672,10 @@ mod test {
                connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
                confirm_transaction_at(&nodes[0], &tx, conf_height);
                connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
-               let as_funding_locked = get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
-               nodes[2].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id()));
+               let as_channel_ready = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
+               nodes[2].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[2].node.get_our_node_id()));
                get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
-               nodes[0].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &as_funding_locked);
+               nodes[0].node.handle_channel_ready(&nodes[2].node.get_our_node_id(), &as_channel_ready);
                get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
 
                // As `msgs::ChannelUpdate` was never handled for the participating node(s) of the second
@@ -1059,10 +1059,10 @@ mod test {
                connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
                confirm_transaction_at(&nodes[3], &tx, conf_height);
                connect_blocks(&nodes[3], CHAN_CONFIRM_DEPTH - 1);
-               let as_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[3].node.get_our_node_id());
-               nodes[1].node.handle_funding_locked(&nodes[3].node.get_our_node_id(), &get_event_msg!(nodes[3], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+               let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[3].node.get_our_node_id());
+               nodes[1].node.handle_channel_ready(&nodes[3].node.get_our_node_id(), &get_event_msg!(nodes[3], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
                get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
-               nodes[3].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
+               nodes[3].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &as_channel_ready);
                get_event_msg!(nodes[3], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 
                // As `msgs::ChannelUpdate` was never handled for the participating node(s) of the third
index f7e42b6634147d405e2cfebe1278801940e0a771..2ac10762b04eaf23f602bc3d8dcc987438542cae 100644 (file)
@@ -549,7 +549,7 @@ mod tests {
                fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &AcceptChannel) {}
                fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &FundingCreated) {}
                fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &FundingSigned) {}
-               fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &FundingLocked) {}
+               fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &ChannelReady) {}
                fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &Shutdown) {}
                fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &ClosingSigned) {}
                fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &UpdateAddHTLC) {}
index a4bc04fed013e0599c52fbf5bdba44a7f4c8238b..58446a46e7d8f02c64941296f00d8c6e1d4f1818 100644 (file)
@@ -1,6 +1,6 @@
 [package]
 name = "lightning-rapid-gossip-sync"
-version = "0.0.104"
+version = "0.0.106"
 authors = ["Arik Sosman <git@arik.io>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
index 4ab1028c7393e3781c45673a9e70852046f2c14b..123f3238ed0755facc3da65f83a410c74629c0ae 100644 (file)
@@ -197,6 +197,7 @@ mod tests {
                                println!("{}", error_string);
                                return;
                        }
+                       #[cfg(require_route_graph_test)]
                        panic!("{}", error_string);
                }
                let elapsed = start.elapsed();
index 503e6bdee0669551d1853932447a5db08fc92c17..e6b5733520a71f74a73526b227f1d704f9cf25f9 100644 (file)
@@ -731,7 +731,7 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
 mod tests {
        use bitcoin::BlockHeader;
        use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
-       use ::{expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
+       use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
        use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
        use chain::{ChannelMonitorUpdateErr, Confirm, Watch};
        use chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
@@ -798,16 +798,18 @@ mod tests {
                create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
                // Route two payments to be claimed at the same time.
-               let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
-               let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
+               let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+               let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
                chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
                chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
 
                nodes[1].node.claim_funds(payment_preimage_1);
                check_added_monitors!(nodes[1], 1);
+               expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
                nodes[1].node.claim_funds(payment_preimage_2);
                check_added_monitors!(nodes[1], 1);
+               expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
 
                chanmon_cfgs[1].persister.set_update_ret(Ok(()));
 
@@ -877,8 +879,9 @@ mod tests {
                let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
 
                // First route a payment that we will claim on chain and give the recipient the preimage.
-               let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
+               let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
                nodes[1].node.claim_funds(payment_preimage);
+               expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
                nodes[1].node.get_and_clear_pending_msg_events();
                check_added_monitors!(nodes[1], 1);
                let remote_txn = get_local_commitment_txn!(nodes[1], channel.2);
index 738fff3837ca045f5c8c9e3e93e588bb88974191..5128600163a42854f0be6c2bb5f27f79608050e6 100644 (file)
@@ -166,11 +166,11 @@ pub struct HTLCUpdate {
        pub(crate) payment_hash: PaymentHash,
        pub(crate) payment_preimage: Option<PaymentPreimage>,
        pub(crate) source: HTLCSource,
-       pub(crate) onchain_value_satoshis: Option<u64>,
+       pub(crate) htlc_value_satoshis: Option<u64>,
 }
 impl_writeable_tlv_based!(HTLCUpdate, {
        (0, payment_hash, required),
-       (1, onchain_value_satoshis, option),
+       (1, htlc_value_satoshis, option),
        (2, source, required),
        (4, payment_preimage, option),
 });
@@ -357,10 +357,10 @@ enum OnchainEvent {
        HTLCUpdate {
                source: HTLCSource,
                payment_hash: PaymentHash,
-               onchain_value_satoshis: Option<u64>,
+               htlc_value_satoshis: Option<u64>,
                /// None in the second case, above, ie when there is no relevant output in the commitment
                /// transaction which appeared on chain.
-               input_idx: Option<u32>,
+               commitment_tx_output_idx: Option<u32>,
        },
        MaturingOutput {
                descriptor: SpendableOutputDescriptor,
@@ -381,7 +381,7 @@ enum OnchainEvent {
        ///  * a revoked-state HTLC transaction was broadcasted, which was claimed by the revocation
        ///    signature.
        HTLCSpendConfirmation {
-               input_idx: u32,
+               commitment_tx_output_idx: u32,
                /// If the claim was made by either party with a preimage, this is filled in
                preimage: Option<PaymentPreimage>,
                /// If the claim was made by us on an inbound HTLC against a local commitment transaction,
@@ -423,9 +423,9 @@ impl MaybeReadable for OnchainEventEntry {
 impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
        (0, HTLCUpdate) => {
                (0, source, required),
-               (1, onchain_value_satoshis, option),
+               (1, htlc_value_satoshis, option),
                (2, payment_hash, required),
-               (3, input_idx, option),
+               (3, commitment_tx_output_idx, option),
        },
        (1, MaturingOutput) => {
                (0, descriptor, required),
@@ -434,7 +434,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
                (0, on_local_output_csv, option),
        },
        (5, HTLCSpendConfirmation) => {
-               (0, input_idx, required),
+               (0, commitment_tx_output_idx, required),
                (2, preimage, option),
                (4, on_to_local_output_csv, option),
        },
@@ -452,7 +452,7 @@ pub(crate) enum ChannelMonitorUpdateStep {
                commitment_txid: Txid,
                htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
                commitment_number: u64,
-               their_revocation_point: PublicKey,
+               their_per_commitment_point: PublicKey,
        },
        PaymentPreimage {
                payment_preimage: PaymentPreimage,
@@ -494,7 +494,7 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
        (1, LatestCounterpartyCommitmentTXInfo) => {
                (0, commitment_txid, required),
                (2, commitment_number, required),
-               (4, their_revocation_point, required),
+               (4, their_per_commitment_point, required),
                (6, htlc_outputs, vec_type),
        },
        (2, PaymentPreimage) => {
@@ -568,13 +568,13 @@ pub enum Balance {
 /// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
 #[derive(PartialEq)]
 struct IrrevocablyResolvedHTLC {
-       input_idx: u32,
+       commitment_tx_output_idx: u32,
        /// Only set if the HTLC claim was ours using a payment preimage
        payment_preimage: Option<PaymentPreimage>,
 }
 
 impl_writeable_tlv_based!(IrrevocablyResolvedHTLC, {
-       (0, input_idx, required),
+       (0, commitment_tx_output_idx, required),
        (2, payment_preimage, option),
 });
 
@@ -619,8 +619,8 @@ pub(crate) struct ChannelMonitorImpl<Signer: Sign> {
        counterparty_commitment_params: CounterpartyCommitmentParameters,
        funding_redeemscript: Script,
        channel_value_satoshis: u64,
-       // first is the idx of the first of the two revocation points
-       their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
+       // first is the idx of the first of the two per-commitment points
+       their_cur_per_commitment_points: Option<(u64, PublicKey, Option<PublicKey>)>,
 
        on_holder_tx_csv: u16,
 
@@ -655,6 +655,10 @@ pub(crate) struct ChannelMonitorImpl<Signer: Sign> {
        // deserialization
        current_holder_commitment_number: u64,
 
+       /// The set of payment hashes from inbound payments for which we know the preimage. Payment
+       /// preimages that are not included in any unrevoked local commitment transaction or unrevoked
+       /// remote commitment transactions are automatically removed when commitment transactions are
+       /// revoked.
        payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
 
        // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated
@@ -753,7 +757,7 @@ impl<Signer: Sign> PartialEq for ChannelMonitorImpl<Signer> {
                        self.counterparty_commitment_params != other.counterparty_commitment_params ||
                        self.funding_redeemscript != other.funding_redeemscript ||
                        self.channel_value_satoshis != other.channel_value_satoshis ||
-                       self.their_cur_revocation_points != other.their_cur_revocation_points ||
+                       self.their_cur_per_commitment_points != other.their_cur_per_commitment_points ||
                        self.on_holder_tx_csv != other.on_holder_tx_csv ||
                        self.commitment_secrets != other.commitment_secrets ||
                        self.counterparty_claimable_outpoints != other.counterparty_claimable_outpoints ||
@@ -828,7 +832,7 @@ impl<Signer: Sign> Writeable for ChannelMonitorImpl<Signer> {
                self.funding_redeemscript.write(writer)?;
                self.channel_value_satoshis.write(writer)?;
 
-               match self.their_cur_revocation_points {
+               match self.their_cur_per_commitment_points {
                        Some((idx, pubkey, second_option)) => {
                                writer.write_all(&byte_utils::be48_to_array(idx))?;
                                writer.write_all(&pubkey.serialize())?;
@@ -1020,7 +1024,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                counterparty_commitment_params,
                                funding_redeemscript,
                                channel_value_satoshis,
-                               their_cur_revocation_points: None,
+                               their_cur_per_commitment_points: None,
 
                                on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
 
@@ -1070,11 +1074,11 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                txid: Txid,
                htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
                commitment_number: u64,
-               their_revocation_point: PublicKey,
+               their_per_commitment_point: PublicKey,
                logger: &L,
        ) where L::Target: Logger {
                self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx(
-                       txid, htlc_outputs, commitment_number, their_revocation_point, logger)
+                       txid, htlc_outputs, commitment_number, their_per_commitment_point, logger)
        }
 
        #[cfg(test)]
@@ -1085,7 +1089,8 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs).map_err(|_| ())
        }
 
-       #[cfg(test)]
+       /// This is used to provide payment preimage(s) out-of-band during startup without updating the
+       /// off-chain state with a new commitment transaction.
        pub(crate) fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
                &self,
                payment_hash: &PaymentHash,
@@ -1391,10 +1396,10 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                macro_rules! walk_htlcs {
                        ($holder_commitment: expr, $htlc_iter: expr) => {
                                for htlc in $htlc_iter {
-                                       if let Some(htlc_input_idx) = htlc.transaction_output_index {
+                                       if let Some(htlc_commitment_tx_output_idx) = htlc.transaction_output_index {
                                                if let Some(conf_thresh) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
                                                        if let OnchainEvent::MaturingOutput { descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) } = &event.event {
-                                                               if descriptor.outpoint.index as u32 == htlc_input_idx { Some(event.confirmation_threshold()) } else { None }
+                                                               if descriptor.outpoint.index as u32 == htlc_commitment_tx_output_idx { Some(event.confirmation_threshold()) } else { None }
                                                        } else { None }
                                                }) {
                                                        debug_assert!($holder_commitment);
@@ -1402,7 +1407,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                                claimable_amount_satoshis: htlc.amount_msat / 1000,
                                                                confirmation_height: conf_thresh,
                                                        });
-                                               } else if us.htlcs_resolved_on_chain.iter().any(|v| v.input_idx == htlc_input_idx) {
+                                               } else if us.htlcs_resolved_on_chain.iter().any(|v| v.commitment_tx_output_idx == htlc_commitment_tx_output_idx) {
                                                        // Funding transaction spends should be fully confirmed by the time any
                                                        // HTLC transactions are resolved, unless we're talking about a holder
                                                        // commitment tx, whose resolution is delayed until the CSV timeout is
@@ -1414,8 +1419,9 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                        // indicating we have spent this HTLC with a timeout, claiming it back
                                                        // and awaiting confirmations on it.
                                                        let htlc_update_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                               if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event {
-                                                                       if input_idx == htlc_input_idx { Some(event.confirmation_threshold()) } else { None }
+                                                               if let OnchainEvent::HTLCUpdate { commitment_tx_output_idx: Some(commitment_tx_output_idx), .. } = event.event {
+                                                                       if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
+                                                                               Some(event.confirmation_threshold()) } else { None }
                                                                } else { None }
                                                        });
                                                        if let Some(conf_thresh) = htlc_update_pending {
@@ -1436,8 +1442,8 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                        // preimage, we lost funds to our counterparty! We will then continue
                                                        // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
                                                        let htlc_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                               if let OnchainEvent::HTLCSpendConfirmation { input_idx, preimage, .. } = event.event {
-                                                                       if input_idx == htlc_input_idx {
+                                                               if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } = event.event {
+                                                                       if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
                                                                                Some((event.confirmation_threshold(), preimage.is_some()))
                                                                        } else { None }
                                                                } else { None }
@@ -1546,7 +1552,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                macro_rules! walk_htlcs {
                        ($holder_commitment: expr, $htlc_iter: expr) => {
                                for (htlc, source) in $htlc_iter {
-                                       if us.htlcs_resolved_on_chain.iter().any(|v| Some(v.input_idx) == htlc.transaction_output_index) {
+                                       if us.htlcs_resolved_on_chain.iter().any(|v| Some(v.commitment_tx_output_idx) == htlc.transaction_output_index) {
                                                // We should assert that funding_spend_confirmed is_some() here, but we
                                                // have some unit tests which violate HTLC transaction CSVs entirely and
                                                // would fail.
@@ -1557,17 +1563,17 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                // indicating we have spent this HTLC with a timeout, claiming it back
                                                // and awaiting confirmations on it.
                                                let htlc_update_confd = us.onchain_events_awaiting_threshold_conf.iter().any(|event| {
-                                                       if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event {
+                                                       if let OnchainEvent::HTLCUpdate { commitment_tx_output_idx: Some(commitment_tx_output_idx), .. } = event.event {
                                                                // If the HTLC was timed out, we wait for ANTI_REORG_DELAY blocks
                                                                // before considering it "no longer pending" - this matches when we
                                                                // provide the ChannelManager an HTLC failure event.
-                                                               Some(input_idx) == htlc.transaction_output_index &&
+                                                               Some(commitment_tx_output_idx) == htlc.transaction_output_index &&
                                                                        us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1
-                                                       } else if let OnchainEvent::HTLCSpendConfirmation { input_idx, .. } = event.event {
+                                                       } else if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, .. } = event.event {
                                                                // If the HTLC was fulfilled with a preimage, we consider the HTLC
                                                                // immediately non-pending, matching when we provide ChannelManager
                                                                // the preimage.
-                                                               Some(input_idx) == htlc.transaction_output_index
+                                                               Some(commitment_tx_output_idx) == htlc.transaction_output_index
                                                        } else { false }
                                                });
                                                if !htlc_update_confd {
@@ -1631,6 +1637,10 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
 
                res
        }
+
+       pub(crate) fn get_stored_preimages(&self) -> HashMap<PaymentHash, PaymentPreimage> {
+               self.inner.lock().unwrap().payment_preimages.clone()
+       }
 }
 
 /// Compares a broadcasted commitment transaction's HTLCs with those in the latest state,
@@ -1688,8 +1698,8 @@ macro_rules! fail_unbroadcast_htlcs {
                                                                event: OnchainEvent::HTLCUpdate {
                                                                        source: (**source).clone(),
                                                                        payment_hash: htlc.payment_hash.clone(),
-                                                                       onchain_value_satoshis: Some(htlc.amount_msat / 1000),
-                                                                       input_idx: None,
+                                                                       htlc_value_satoshis: Some(htlc.amount_msat / 1000),
+                                                                       commitment_tx_output_idx: None,
                                                                },
                                                        };
                                                        log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction, waiting for confirmation (at height {})",
@@ -1761,7 +1771,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                Ok(())
        }
 
-       pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger {
+       pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L) where L::Target: Logger {
                // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
                // so that a remote monitor doesn't learn anything unless there is a malicious close.
                // (only maybe, sadly we cant do the same for local info, as we need to be aware of
@@ -1776,22 +1786,22 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                self.counterparty_claimable_outpoints.insert(txid, htlc_outputs.clone());
                self.current_counterparty_commitment_number = commitment_number;
                //TODO: Merge this into the other per-counterparty-transaction output storage stuff
-               match self.their_cur_revocation_points {
+               match self.their_cur_per_commitment_points {
                        Some(old_points) => {
                                if old_points.0 == commitment_number + 1 {
-                                       self.their_cur_revocation_points = Some((old_points.0, old_points.1, Some(their_revocation_point)));
+                                       self.their_cur_per_commitment_points = Some((old_points.0, old_points.1, Some(their_per_commitment_point)));
                                } else if old_points.0 == commitment_number + 2 {
                                        if let Some(old_second_point) = old_points.2 {
-                                               self.their_cur_revocation_points = Some((old_points.0 - 1, old_second_point, Some(their_revocation_point)));
+                                               self.their_cur_per_commitment_points = Some((old_points.0 - 1, old_second_point, Some(their_per_commitment_point)));
                                        } else {
-                                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                                               self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
                                        }
                                } else {
-                                       self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                                       self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
                                }
                        },
                        None => {
-                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                               self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
                        }
                }
                let mut htlcs = Vec::with_capacity(htlc_outputs.len());
@@ -1929,9 +1939,9 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                ret = Err(());
                                        }
                                }
-                               ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_revocation_point } => {
+                               ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_per_commitment_point } => {
                                        log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
-                                       self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger)
+                                       self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point, logger)
                                },
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
                                        log_trace!(logger, "Updating ChannelMonitor with payment preimage");
@@ -2120,18 +2130,18 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
        fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<PackageTemplate> {
                let mut claimable_outpoints = Vec::new();
                if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) {
-                       if let Some(revocation_points) = self.their_cur_revocation_points {
-                               let revocation_point_option =
+                       if let Some(per_commitment_points) = self.their_cur_per_commitment_points {
+                               let per_commitment_point_option =
                                        // If the counterparty commitment tx is the latest valid state, use their latest
                                        // per-commitment point
-                                       if revocation_points.0 == commitment_number { Some(&revocation_points.1) }
-                                       else if let Some(point) = revocation_points.2.as_ref() {
+                                       if per_commitment_points.0 == commitment_number { Some(&per_commitment_points.1) }
+                                       else if let Some(point) = per_commitment_points.2.as_ref() {
                                                // If counterparty commitment tx is the state previous to the latest valid state, use
                                                // their previous per-commitment point (non-atomicity of revocation means it's valid for
                                                // them to temporarily have two valid commitment txns from our viewpoint)
-                                               if revocation_points.0 == commitment_number + 1 { Some(point) } else { None }
+                                               if per_commitment_points.0 == commitment_number + 1 { Some(point) } else { None }
                                        } else { None };
-                               if let Some(revocation_point) = revocation_point_option {
+                               if let Some(per_commitment_point) = per_commitment_point_option {
                                        for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
                                                if let Some(transaction_output_index) = htlc.transaction_output_index {
                                                        if let Some(transaction) = tx {
@@ -2142,7 +2152,19 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        }
                                                        let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
                                                        if preimage.is_some() || !htlc.offered {
-                                                               let counterparty_htlc_outp = if htlc.offered { PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(*revocation_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, preimage.unwrap(), htlc.clone())) } else { PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(*revocation_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, htlc.clone())) };
+                                                               let counterparty_htlc_outp = if htlc.offered {
+                                                                       PackageSolvingData::CounterpartyOfferedHTLCOutput(
+                                                                               CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
+                                                                                       self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+                                                                                       self.counterparty_commitment_params.counterparty_htlc_base_key,
+                                                                                       preimage.unwrap(), htlc.clone()))
+                                                               } else {
+                                                                       PackageSolvingData::CounterpartyReceivedHTLCOutput(
+                                                                               CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
+                                                                                       self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+                                                                                       self.counterparty_commitment_params.counterparty_htlc_base_key,
+                                                                                       htlc.clone()))
+                                                               };
                                                                let aggregation = if !htlc.offered { false } else { true };
                                                                let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
                                                                claimable_outpoints.push(counterparty_package);
@@ -2522,7 +2544,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                // Produce actionable events from on-chain events having reached their threshold.
                for entry in onchain_events_reaching_threshold_conf.drain(..) {
                        match entry.event {
-                               OnchainEvent::HTLCUpdate { ref source, payment_hash, onchain_value_satoshis, input_idx } => {
+                               OnchainEvent::HTLCUpdate { ref source, payment_hash, htlc_value_satoshis, commitment_tx_output_idx } => {
                                        // Check for duplicate HTLC resolutions.
                                        #[cfg(debug_assertions)]
                                        {
@@ -2544,10 +2566,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                payment_hash,
                                                payment_preimage: None,
                                                source: source.clone(),
-                                               onchain_value_satoshis,
+                                               htlc_value_satoshis,
                                        }));
-                                       if let Some(idx) = input_idx {
-                                               self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { input_idx: idx, payment_preimage: None });
+                                       if let Some(idx) = commitment_tx_output_idx {
+                                               self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx: idx, payment_preimage: None });
                                        }
                                },
                                OnchainEvent::MaturingOutput { descriptor } => {
@@ -2556,8 +2578,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                outputs: vec![descriptor]
                                        });
                                },
-                               OnchainEvent::HTLCSpendConfirmation { input_idx, preimage, .. } => {
-                                       self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { input_idx, payment_preimage: preimage });
+                               OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
+                                       self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx, payment_preimage: preimage });
                                },
                                OnchainEvent::FundingSpendConfirmation { .. } => {
                                        self.funding_spend_confirmed = Some(entry.txid);
@@ -2826,7 +2848,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                                        self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                                                txid: tx.txid(), height,
                                                                                event: OnchainEvent::HTLCSpendConfirmation {
-                                                                                       input_idx: input.previous_output.vout,
+                                                                                       commitment_tx_output_idx: input.previous_output.vout,
                                                                                        preimage: if accepted_preimage_claim || offered_preimage_claim {
                                                                                                Some(payment_preimage) } else { None },
                                                                                        // If this is a payment to us (!outbound_htlc, above),
@@ -2877,7 +2899,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        txid: tx.txid(),
                                                        height,
                                                        event: OnchainEvent::HTLCSpendConfirmation {
-                                                               input_idx: input.previous_output.vout,
+                                                               commitment_tx_output_idx: input.previous_output.vout,
                                                                preimage: Some(payment_preimage),
                                                                on_to_local_output_csv: None,
                                                        },
@@ -2886,7 +2908,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        source,
                                                        payment_preimage: Some(payment_preimage),
                                                        payment_hash,
-                                                       onchain_value_satoshis: Some(amount_msat / 1000),
+                                                       htlc_value_satoshis: Some(amount_msat / 1000),
                                                }));
                                        }
                                } else if offered_preimage_claim {
@@ -2898,7 +2920,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        txid: tx.txid(),
                                                        height,
                                                        event: OnchainEvent::HTLCSpendConfirmation {
-                                                               input_idx: input.previous_output.vout,
+                                                               commitment_tx_output_idx: input.previous_output.vout,
                                                                preimage: Some(payment_preimage),
                                                                on_to_local_output_csv: None,
                                                        },
@@ -2907,7 +2929,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        source,
                                                        payment_preimage: Some(payment_preimage),
                                                        payment_hash,
-                                                       onchain_value_satoshis: Some(amount_msat / 1000),
+                                                       htlc_value_satoshis: Some(amount_msat / 1000),
                                                }));
                                        }
                                } else {
@@ -2925,8 +2947,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                height,
                                                event: OnchainEvent::HTLCUpdate {
                                                        source, payment_hash,
-                                                       onchain_value_satoshis: Some(amount_msat / 1000),
-                                                       input_idx: Some(input.previous_output.vout),
+                                                       htlc_value_satoshis: Some(amount_msat / 1000),
+                                                       commitment_tx_output_idx: Some(input.previous_output.vout),
                                                },
                                        };
                                        log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
@@ -3094,7 +3116,7 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                let funding_redeemscript = Readable::read(reader)?;
                let channel_value_satoshis = Readable::read(reader)?;
 
-               let their_cur_revocation_points = {
+               let their_cur_per_commitment_points = {
                        let first_idx = <U48 as Readable>::read(reader)?.0;
                        if first_idx == 0 {
                                None
@@ -3283,7 +3305,7 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                                counterparty_commitment_params,
                                funding_redeemscript,
                                channel_value_satoshis,
-                               their_cur_revocation_points,
+                               their_cur_per_commitment_points,
 
                                on_holder_tx_csv,
 
index 91688cc4fa3c3eed26b46b991c00dee94b2106e6..770fe8139413446bcc498086d00572db696f7dbc 100644 (file)
@@ -89,7 +89,7 @@ fn test_monitor_and_persister_update_fail() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
 
        // Route an HTLC from node 0 to node 1 (but don't settle)
-       let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
+       let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
 
        // Make a copy of the ChainMonitor so we can capture the error it returns on a
        // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
@@ -103,7 +103,7 @@ fn test_monitor_and_persister_update_fail() {
                // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
                // that we are at height 200 so that it doesn't think we're violating the time lock
                // requirements of transactions broadcasted at that point.
-               blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])),
+               blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
        };
        let chain_mon = {
                let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
@@ -123,8 +123,10 @@ fn test_monitor_and_persister_update_fail() {
        persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
 
        // Try to update ChannelMonitor
-       assert!(nodes[1].node.claim_funds(preimage));
+       nodes[1].node.claim_funds(preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
        check_added_monitors!(nodes[1], 1);
+
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
@@ -189,9 +191,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        let events_3 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_3.len(), 1);
        match events_3[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(payment_hash_1, *payment_hash);
-                       assert_eq!(amt, 1000000);
+                       assert_eq!(amount_msat, 1_000_000);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -267,7 +269,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -283,8 +285,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
 
        // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
        // but nodes[0] won't respond since it is frozen.
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
+
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
        let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
@@ -555,9 +559,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
        match events_5[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(payment_hash_2, *payment_hash);
-                       assert_eq!(amt, 1000000);
+                       assert_eq!(amount_msat, 1_000_000);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -672,9 +676,9 @@ fn test_monitor_update_fail_cs() {
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
-               Event::PaymentReceived { payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(payment_hash, our_payment_hash);
-                       assert_eq!(amt, 1000000);
+                       assert_eq!(amount_msat, 1_000_000);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -827,7 +831,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
-       assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
+       nodes[2].node.fail_htlc_backwards(&payment_hash_1);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
 
@@ -1088,13 +1092,15 @@ fn test_monitor_update_fail_reestablish() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
 
-       let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 
-       assert!(nodes[2].node.claim_funds(payment_preimage));
+       nodes[2].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
        let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
@@ -1292,13 +1298,14 @@ fn claim_while_disconnected_monitor_update_fail() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
 
        // Forward a payment for B to claim
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
 
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
@@ -1578,10 +1585,11 @@ fn test_monitor_update_fail_claim() {
        // Rebalance a bit so that we can send backwards from 3 to 2.
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
@@ -1642,9 +1650,9 @@ fn test_monitor_update_fail_claim() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        match events[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(payment_hash_2, *payment_hash);
-                       assert_eq!(1_000_000, amt);
+                       assert_eq!(1_000_000, amount_msat);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -1656,9 +1664,9 @@ fn test_monitor_update_fail_claim() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(payment_hash_3, *payment_hash);
-                       assert_eq!(1_000_000, amt);
+                       assert_eq!(1_000_000, amount_msat);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -1688,7 +1696,7 @@ fn test_monitor_update_on_pending_forwards() {
        send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
 
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
-       assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
+       nodes[2].node.fail_htlc_backwards(&payment_hash_1);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
 
@@ -1754,7 +1762,7 @@ fn monitor_update_claim_fail_no_response() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
 
        // Forward a payment for B to claim
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -1770,8 +1778,10 @@ fn monitor_update_claim_fail_no_response() {
        let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
 
        chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
+
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
@@ -1838,7 +1848,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        if confirm_a_first {
                confirm_transaction(&nodes[0], &funding_tx);
-               nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+               nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        } else {
@@ -1847,7 +1857,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        }
 
-       // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
+       // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@ -1861,7 +1871,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        }
        if !confirm_a_first && !restore_b_before_lock {
                confirm_transaction(&nodes[0], &funding_tx);
-               nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+               nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        }
@@ -1873,13 +1883,13 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
                if !restore_b_before_lock {
-                       let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
-                       (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
+                       let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+                       (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
                } else {
-                       nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
+                       nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
                        confirm_transaction(&nodes[0], &funding_tx);
-                       let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
-                       (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
+                       let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
+                       (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
                }
        } else {
                if restore_b_before_conf {
@@ -1887,8 +1897,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
                        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
                        confirm_transaction(&nodes[1], &funding_tx);
                }
-               let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
-               (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
+               let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+               (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
        };
        for node in nodes.iter() {
                assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
@@ -2076,13 +2086,15 @@ fn test_fail_htlc_on_broadcast_after_claim() {
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
        let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
 
-       let payment_preimage = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
 
        let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
        assert_eq!(bs_txn.len(), 1);
 
        nodes[2].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 2000);
+
        let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -2235,7 +2247,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        //
        // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c)
        // will not be freed from the holding cell.
-       let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000);
+       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
 
        nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@ -2246,8 +2258,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        check_added_monitors!(nodes[0], 0);
 
        chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       assert!(nodes[0].node.claim_funds(payment_preimage_0));
+       nodes[0].node.claim_funds(payment_preimage_0);
        check_added_monitors!(nodes[0], 1);
+       expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
@@ -2455,13 +2468,15 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                payment_preimage,
        };
        if second_fails {
-               assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
+               nodes[2].node.fail_htlc_backwards(&payment_hash);
                expect_pending_htlcs_forwardable!(nodes[2]);
                check_added_monitors!(nodes[2], 1);
                get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        } else {
-               assert!(nodes[2].node.claim_funds(payment_preimage));
+               nodes[2].node.claim_funds(payment_preimage);
                check_added_monitors!(nodes[2], 1);
+               expect_payment_claimed!(nodes[2], payment_hash, 100_000);
+
                let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
                assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
                // Check that the message we're about to deliver matches the one generated:
@@ -2630,20 +2645,22 @@ fn double_temp_error() {
 
        let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
-       let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
        // `claim_funds` results in a ChannelMonitorUpdate.
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
 
        chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
        // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
        // which had some asserts that prevented it from being called twice.
-       assert!(nodes[1].node.claim_funds(payment_preimage_2));
+       nodes[1].node.claim_funds(payment_preimage_2);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
        chanmon_cfgs[1].persister.set_update_ret(Ok(()));
 
        let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
index 6c610402350211c1d4cf24c85f97ae11edb2aac1..ca02f0a9ac9e302ae10047025097ff24c1fcc7cd 100644 (file)
@@ -243,7 +243,7 @@ enum HTLCUpdateAwaitingACK {
 
 /// There are a few "states" and then a number of flags which can be applied:
 /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
-/// TheirFundingLocked and OurFundingLocked then get set on FundingSent, and when both are set we
+/// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
 /// move on to ChannelFunded.
 /// Note that PeerDisconnected can be set on both ChannelFunded and FundingSent.
 /// ChannelFunded can then get all remaining flags set on it, until we finish shutdown, then we
@@ -258,15 +258,15 @@ enum ChannelState {
        /// upon receipt of funding_created, so simply skip this state.
        FundingCreated = 4,
        /// Set when we have received/sent funding_created and funding_signed and are thus now waiting
-       /// on the funding transaction to confirm. The FundingLocked flags are set to indicate when we
+       /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
        /// and our counterparty consider the funding transaction confirmed.
        FundingSent = 8,
-       /// Flag which can be set on FundingSent to indicate they sent us a funding_locked message.
-       /// Once both TheirFundingLocked and OurFundingLocked are set, state moves on to ChannelFunded.
-       TheirFundingLocked = 1 << 4,
-       /// Flag which can be set on FundingSent to indicate we sent them a funding_locked message.
-       /// Once both TheirFundingLocked and OurFundingLocked are set, state moves on to ChannelFunded.
-       OurFundingLocked = 1 << 5,
+       /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
+       /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
+       TheirChannelReady = 1 << 4,
+       /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
+       /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
+       OurChannelReady = 1 << 5,
        ChannelFunded = 64,
        /// Flag which is set on ChannelFunded and FundingSent indicating remote side is considered
        /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
@@ -429,13 +429,13 @@ pub(super) struct MonitorRestoreUpdates {
        pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        pub finalized_claimed_htlcs: Vec<HTLCSource>,
        pub funding_broadcastable: Option<Transaction>,
-       pub funding_locked: Option<msgs::FundingLocked>,
+       pub channel_ready: Option<msgs::ChannelReady>,
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
 }
 
 /// The return value of `channel_reestablish`
 pub(super) struct ReestablishResponses {
-       pub funding_locked: Option<msgs::FundingLocked>,
+       pub channel_ready: Option<msgs::ChannelReady>,
        pub raa: Option<msgs::RevokeAndACK>,
        pub commitment_update: Option<msgs::CommitmentUpdate>,
        pub order: RAACommitmentOrder,
@@ -543,7 +543,7 @@ pub(super) struct Channel<Signer: Sign> {
        /// send it first.
        resend_order: RAACommitmentOrder,
 
-       monitor_pending_funding_locked: bool,
+       monitor_pending_channel_ready: bool,
        monitor_pending_revoke_and_ack: bool,
        monitor_pending_commitment_signed: bool,
        monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
@@ -688,12 +688,12 @@ pub(super) struct Channel<Signer: Sign> {
 
        /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
        /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
-       /// funding_locked *before* sending the channel_reestablish (which is clearly a violation of
-       /// the BOLT specs). We copy c-lightning's workaround here and simply store the funding_locked
+       /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
+       /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
        /// message until we receive a channel_reestablish.
        ///
        /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
-       pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
+       pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
 
        #[cfg(any(test, fuzzing))]
        // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
@@ -710,6 +710,11 @@ pub(super) struct Channel<Signer: Sign> {
        // Our counterparty can offer us SCID aliases which they will map to this channel when routing
        // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
        // the channel's funding UTXO.
+       //
+       // We also use this when sending our peer a channel_update that isn't to be broadcasted
+       // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
+       // associated channel mapping.
+       //
        // We only bother storing the most recent SCID alias at any time, though our counterparty has
        // to store all of them.
        latest_inbound_scid_alias: Option<u64>,
@@ -955,7 +960,7 @@ impl<Signer: Sign> Channel<Signer> {
 
                        resend_order: RAACommitmentOrder::CommitmentFirst,
 
-                       monitor_pending_funding_locked: false,
+                       monitor_pending_channel_ready: false,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
                        monitor_pending_forwards: Vec::new(),
@@ -1272,7 +1277,7 @@ impl<Signer: Sign> Channel<Signer> {
 
                        resend_order: RAACommitmentOrder::CommitmentFirst,
 
-                       monitor_pending_funding_locked: false,
+                       monitor_pending_channel_ready: false,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
                        monitor_pending_forwards: Vec::new(),
@@ -1307,7 +1312,7 @@ impl<Signer: Sign> Channel<Signer> {
                        counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
                        holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat },
                        counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
-                       minimum_depth: Some(config.own_channel_config.minimum_depth),
+                       minimum_depth: Some(cmp::max(config.own_channel_config.minimum_depth, 1)),
 
                        counterparty_forwarding_info: None,
 
@@ -1703,6 +1708,28 @@ impl<Signer: Sign> Channel<Signer> {
                make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
        }
 
+       /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+       /// entirely.
+       ///
+       /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+       /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+       ///
+       /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+       /// disconnected).
+       pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+               (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+       where L::Target: Logger {
+               // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+               // (see equivalent if condition there).
+               assert!(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) != 0);
+               let mon_update_id = self.latest_monitor_update_id; // Forget the ChannelMonitor update
+               let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+               self.latest_monitor_update_id = mon_update_id;
+               if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+                       assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
+               }
+       }
+
        fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
                // Either ChannelFunded got set (which means it won't be unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
@@ -1765,6 +1792,10 @@ impl<Signer: Sign> Channel<Signer> {
                };
 
                if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
+                       // Note that this condition is the same as the assertion in
+                       // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+                       // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+                       // do not not get into this branch.
                        for pending_update in self.holding_cell_htlc_updates.iter() {
                                match pending_update {
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
@@ -1987,12 +2018,6 @@ impl<Signer: Sign> Channel<Signer> {
                if msg.minimum_depth > peer_limits.max_minimum_depth {
                        return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
                }
-               if msg.minimum_depth == 0 {
-                       // Note that if this changes we should update the serialization minimum version to
-                       // indicate to older clients that they don't understand some features of the current
-                       // channel.
-                       return Err(ChannelError::Close("Minimum confirmation depth must be at least 1".to_owned()));
-               }
 
                if let Some(ty) = &msg.channel_type {
                        if *ty != self.channel_type {
@@ -2029,7 +2054,12 @@ impl<Signer: Sign> Channel<Signer> {
                self.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
                self.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
                self.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
-               self.minimum_depth = Some(msg.minimum_depth);
+
+               if peer_limits.trust_own_funding_0conf {
+                       self.minimum_depth = Some(msg.minimum_depth);
+               } else {
+                       self.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+               }
 
                let counterparty_pubkeys = ChannelPublicKeys {
                        funding_pubkey: msg.funding_pubkey,
@@ -2089,7 +2119,7 @@ impl<Signer: Sign> Channel<Signer> {
                &self.get_counterparty_pubkeys().funding_pubkey
        }
 
-       pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError> where L::Target: Logger {
+       pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
                if self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
                }
@@ -2164,12 +2194,12 @@ impl<Signer: Sign> Channel<Signer> {
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
-               }, channel_monitor))
+               }, channel_monitor, self.check_get_channel_ready(0)))
        }
 
        /// Handles a funding_signed message from the remote end.
        /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction), ChannelError> where L::Target: Logger {
+       pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
                if !self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
                }
@@ -2238,16 +2268,16 @@ impl<Signer: Sign> Channel<Signer> {
 
                log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
 
-               Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap()))
+               Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
        }
 
-       /// Handles a funding_locked message from our peer. If we've already sent our funding_locked
+       /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
        /// and the channel is now usable (and public), this may generate an announcement_signatures to
        /// reply with.
-       pub fn funding_locked<L: Deref>(&mut self, msg: &msgs::FundingLocked, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError> where L::Target: Logger {
+       pub fn channel_ready<L: Deref>(&mut self, msg: &msgs::ChannelReady, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError> where L::Target: Logger {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        self.workaround_lnd_bug_4006 = Some(msg.clone());
-                       return Err(ChannelError::Ignore("Peer sent funding_locked when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
+                       return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
                }
 
                if let Some(scid_alias) = msg.short_channel_id_alias {
@@ -2262,16 +2292,16 @@ impl<Signer: Sign> Channel<Signer> {
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
 
                if non_shutdown_state == ChannelState::FundingSent as u32 {
-                       self.channel_state |= ChannelState::TheirFundingLocked as u32;
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
+                       self.channel_state |= ChannelState::TheirChannelReady as u32;
+               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
                        self.update_time_counter += 1;
                } else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 ||
-                       // If we reconnected before sending our funding locked they may still resend theirs:
-                       (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) ==
-                                             (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32))
+                       // If we reconnected before sending our `channel_ready` they may still resend theirs:
+                       (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
+                                             (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
                {
-                       // They probably disconnected/reconnected and re-sent the funding_locked, which is
+                       // They probably disconnected/reconnected and re-sent the channel_ready, which is
                        // required, or they're sending a fresh SCID alias.
                        let expected_point =
                                if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
@@ -2279,24 +2309,24 @@ impl<Signer: Sign> Channel<Signer> {
                                        // the current one.
                                        self.counterparty_cur_commitment_point
                                } else {
-                                       // If they have sent updated points, funding_locked is always supposed to match
+                                       // If they have sent updated points, channel_ready is always supposed to match
                                        // their "first" point, which we re-derive here.
                                        Some(PublicKey::from_secret_key(&self.secp_ctx, &SecretKey::from_slice(
                                                        &self.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
                                                ).expect("We already advanced, so previous secret keys should have been validated already")))
                                };
                        if expected_point != Some(msg.next_per_commitment_point) {
-                               return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point".to_owned()));
+                               return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
                        }
                        return Ok(None);
                } else {
-                       return Err(ChannelError::Close("Peer sent a funding_locked at a strange time".to_owned()));
+                       return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
                }
 
                self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
                self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
 
-               log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id()));
+               log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id()));
 
                Ok(self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger))
        }
@@ -3540,12 +3570,13 @@ impl<Signer: Sign> Channel<Signer> {
        /// monitor update failure must *not* have been sent to the remote end, and must instead
        /// have been dropped. They will be regenerated when monitor_updating_restored is called.
        pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool,
-               mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+               resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
        ) {
                self.monitor_pending_revoke_and_ack |= resend_raa;
                self.monitor_pending_commitment_signed |= resend_commitment;
+               self.monitor_pending_channel_ready |= resend_channel_ready;
                self.monitor_pending_forwards.append(&mut pending_forwards);
                self.monitor_pending_failures.append(&mut pending_fails);
                self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
@@ -3559,20 +3590,31 @@ impl<Signer: Sign> Channel<Signer> {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
 
-               let funding_broadcastable = if self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.is_outbound() {
-                       self.funding_transaction.take()
-               } else { None };
+               // If we're past (or at) the FundingSent stage on an outbound channel, try to
+               // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+               // first received the funding_signed.
+               let mut funding_broadcastable =
+                       if self.is_outbound() && self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+                               self.funding_transaction.take()
+                       } else { None };
+               // That said, if the funding transaction is already confirmed (ie we're active with a
+               // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+               if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelFunded as u32 && self.minimum_depth != Some(0) {
+                       funding_broadcastable = None;
+               }
 
                // We will never broadcast the funding transaction when we're in MonitorUpdateFailed (and
                // we assume the user never directly broadcasts the funding transaction and waits for us to
-               // do it). Thus, we can only ever hit monitor_pending_funding_locked when we're an inbound
-               // channel which failed to persist the monitor on funding_created, and we got the funding
-               // transaction confirmed before the monitor was persisted.
-               let funding_locked = if self.monitor_pending_funding_locked {
-                       assert!(!self.is_outbound(), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
-                       self.monitor_pending_funding_locked = false;
+               // do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
+               // * an inbound channel that failed to persist the monitor on funding_created and we got
+               //   the funding transaction confirmed before the monitor was persisted, or
+               // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
+               let channel_ready = if self.monitor_pending_channel_ready {
+                       assert!(!self.is_outbound() || self.minimum_depth == Some(0),
+                               "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+                       self.monitor_pending_channel_ready = false;
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
-                       Some(msgs::FundingLocked {
+                       Some(msgs::ChannelReady {
                                channel_id: self.channel_id(),
                                next_per_commitment_point,
                                short_channel_id_alias: Some(self.outbound_scid_alias),
@@ -3593,7 +3635,7 @@ impl<Signer: Sign> Channel<Signer> {
                        self.monitor_pending_commitment_signed = false;
                        return MonitorRestoreUpdates {
                                raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
-                               accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs
+                               accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
                        };
                }
 
@@ -3612,7 +3654,7 @@ impl<Signer: Sign> Channel<Signer> {
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
-                       raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs
+                       raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
                }
        }
 
@@ -3784,15 +3826,15 @@ impl<Signer: Sign> Channel<Signer> {
                let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger);
 
                if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
-                       // If we're waiting on a monitor update, we shouldn't re-send any funding_locked's.
-                       if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 ||
+                       // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
+                       if self.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
                                        self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                if msg.next_remote_commitment_number != 0 {
-                                       return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned()));
+                                       return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
                                }
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok(ReestablishResponses {
-                                       funding_locked: None,
+                                       channel_ready: None,
                                        raa: None, commitment_update: None, mon_update: None,
                                        order: RAACommitmentOrder::CommitmentFirst,
                                        holding_cell_failed_htlcs: Vec::new(),
@@ -3800,10 +3842,10 @@ impl<Signer: Sign> Channel<Signer> {
                                });
                        }
 
-                       // We have OurFundingLocked set!
+                       // We have OurChannelReady set!
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
                        return Ok(ReestablishResponses {
-                               funding_locked: Some(msgs::FundingLocked {
+                               channel_ready: Some(msgs::ChannelReady {
                                        channel_id: self.channel_id(),
                                        next_per_commitment_point,
                                        short_channel_id_alias: Some(self.outbound_scid_alias),
@@ -3817,7 +3859,7 @@ impl<Signer: Sign> Channel<Signer> {
 
                let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
                        // Remote isn't waiting on any RevokeAndACK from us!
-                       // Note that if we need to repeat our FundingLocked we'll do that in the next if block.
+                       // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
                        None
                } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_holder_commitment_transaction_number {
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
@@ -3836,10 +3878,10 @@ impl<Signer: Sign> Channel<Signer> {
                // the corresponding revoke_and_ack back yet.
                let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 };
 
-               let funding_locked = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
-                       // We should never have to worry about MonitorUpdateFailed resending FundingLocked
+               let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
+                       // We should never have to worry about MonitorUpdateFailed resending ChannelReady
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
-                       Some(msgs::FundingLocked {
+                       Some(msgs::ChannelReady {
                                channel_id: self.channel_id(),
                                next_per_commitment_point,
                                short_channel_id_alias: Some(self.outbound_scid_alias),
@@ -3864,7 +3906,7 @@ impl<Signer: Sign> Channel<Signer> {
                                                panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
                                        Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
                                                Ok(ReestablishResponses {
-                                                       funding_locked, shutdown_msg, announcement_sigs,
+                                                       channel_ready, shutdown_msg, announcement_sigs,
                                                        raa: required_revoke,
                                                        commitment_update: Some(commitment_update),
                                                        order: self.resend_order.clone(),
@@ -3874,7 +3916,7 @@ impl<Signer: Sign> Channel<Signer> {
                                        },
                                        Ok((None, holding_cell_failed_htlcs)) => {
                                                Ok(ReestablishResponses {
-                                                       funding_locked, shutdown_msg, announcement_sigs,
+                                                       channel_ready, shutdown_msg, announcement_sigs,
                                                        raa: required_revoke,
                                                        commitment_update: None,
                                                        order: self.resend_order.clone(),
@@ -3885,7 +3927,7 @@ impl<Signer: Sign> Channel<Signer> {
                                }
                        } else {
                                Ok(ReestablishResponses {
-                                       funding_locked, shutdown_msg, announcement_sigs,
+                                       channel_ready, shutdown_msg, announcement_sigs,
                                        raa: required_revoke,
                                        commitment_update: None,
                                        order: self.resend_order.clone(),
@@ -3903,14 +3945,14 @@ impl<Signer: Sign> Channel<Signer> {
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                self.monitor_pending_commitment_signed = true;
                                Ok(ReestablishResponses {
-                                       funding_locked, shutdown_msg, announcement_sigs,
+                                       channel_ready, shutdown_msg, announcement_sigs,
                                        commitment_update: None, raa: None, mon_update: None,
                                        order: self.resend_order.clone(),
                                        holding_cell_failed_htlcs: Vec::new(),
                                })
                        } else {
                                Ok(ReestablishResponses {
-                                       funding_locked, shutdown_msg, announcement_sigs,
+                                       channel_ready, shutdown_msg, announcement_sigs,
                                        raa: required_revoke,
                                        commitment_update: Some(self.get_last_commitment_update(logger)),
                                        order: self.resend_order.clone(),
@@ -4325,7 +4367,7 @@ impl<Signer: Sign> Channel<Signer> {
                &self.channel_type
        }
 
-       /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
+       /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
        /// is_usable() returns true).
        /// Allowed in any state (including after shutdown)
        pub fn get_short_channel_id(&self) -> Option<u64> {
@@ -4530,7 +4572,7 @@ impl<Signer: Sign> Channel<Signer> {
        /// Allowed in any state (including after shutdown)
        pub fn is_usable(&self) -> bool {
                let mask = ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK;
-               (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_funding_locked
+               (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_channel_ready
        }
 
        /// Returns true if this channel is currently available for use. This is a superset of
@@ -4551,6 +4593,11 @@ impl<Signer: Sign> Channel<Signer> {
                self.channel_state >= ChannelState::FundingSent as u32
        }
 
+       /// Returns true if our channel_ready has been sent
+       pub fn is_our_channel_ready(&self) -> bool {
+               (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelFunded as u32
+       }
+
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
        pub fn received_shutdown(&self) -> bool {
                (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
@@ -4580,8 +4627,8 @@ impl<Signer: Sign> Channel<Signer> {
                self.channel_update_status = status;
        }
 
-       fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
-               if self.funding_tx_confirmation_height == 0 {
+       fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+               if self.funding_tx_confirmation_height == 0 && self.minimum_depth != Some(0) {
                        return None;
                }
 
@@ -4596,13 +4643,13 @@ impl<Signer: Sign> Channel<Signer> {
 
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
                let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
-                       self.channel_state |= ChannelState::OurFundingLocked as u32;
+                       self.channel_state |= ChannelState::OurChannelReady as u32;
                        true
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
+               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
                        self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
                        self.update_time_counter += 1;
                        true
-               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
+               } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else if self.channel_state < ChannelState::ChannelFunded as u32 {
@@ -4617,14 +4664,14 @@ impl<Signer: Sign> Channel<Signer> {
                                if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
                                        let next_per_commitment_point =
                                                self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.secp_ctx);
-                                       return Some(msgs::FundingLocked {
+                                       return Some(msgs::ChannelReady {
                                                channel_id: self.channel_id,
                                                next_per_commitment_point,
                                                short_channel_id_alias: Some(self.outbound_scid_alias),
                                        });
                                }
                        } else {
-                               self.monitor_pending_funding_locked = true;
+                               self.monitor_pending_channel_ready = true;
                        }
                }
                None
@@ -4635,13 +4682,12 @@ impl<Signer: Sign> Channel<Signer> {
        /// In the second, we simply return an Err indicating we need to be force-closed now.
        pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32,
                txdata: &TransactionData, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
-       -> Result<(Option<msgs::FundingLocked>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
-               let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
+       -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
                if let Some(funding_txo) = self.get_funding_txo() {
                        for &(index_in_block, tx) in txdata.iter() {
-                               // If we haven't yet sent a funding_locked, but are in FundingSent (ignoring
-                               // whether they've sent a funding_locked or not), check if we should send one.
-                               if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
+                               // Check if the transaction is the expected funding transaction, and if it is,
+                               // check that it pays the right amount to the right script.
+                               if self.funding_tx_confirmation_height == 0 {
                                        if tx.txid() == funding_txo.txid {
                                                let txo_idx = funding_txo.index as usize;
                                                if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
@@ -4677,13 +4723,13 @@ impl<Signer: Sign> Channel<Signer> {
                                                        }
                                                }
                                        }
-                                       // If we allow 1-conf funding, we may need to check for funding_locked here and
+                                       // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
-                                       if let Some(funding_locked) = self.check_get_funding_locked(height) {
-                                               log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
+                                       if let Some(channel_ready) = self.check_get_channel_ready(height) {
+                                               log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
                                                let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger);
-                                               return Ok((Some(funding_locked), announcement_sigs));
+                                               return Ok((Some(channel_ready), announcement_sigs));
                                        }
                                }
                                for inp in tx.input.iter() {
@@ -4709,12 +4755,12 @@ impl<Signer: Sign> Channel<Signer> {
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
        pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
-       -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
+       -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
                self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_pk)), logger)
        }
 
        fn do_best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, genesis_node_pk: Option<(BlockHash, PublicKey)>, logger: &L)
-       -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
+       -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
                let mut timed_out_htlcs = Vec::new();
                // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
                // forward an HTLC when our counterparty should almost certainly just fail it for expiring
@@ -4734,33 +4780,33 @@ impl<Signer: Sign> Channel<Signer> {
 
                self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
 
-               if let Some(funding_locked) = self.check_get_funding_locked(height) {
+               if let Some(channel_ready) = self.check_get_channel_ready(height) {
                        let announcement_sigs = if let Some((genesis_block_hash, node_pk)) = genesis_node_pk {
                                self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger)
                        } else { None };
-                       log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
-                       return Ok((Some(funding_locked), timed_out_htlcs, announcement_sigs));
+                       log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
+                       return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
 
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
                if non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
-                  (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 {
+                  (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
                        let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
                        if self.funding_tx_confirmation_height == 0 {
-                               // Note that check_get_funding_locked may reset funding_tx_confirmation_height to
+                               // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
                                // zero if it has been reorged out, however in either case, our state flags
-                               // indicate we've already sent a funding_locked
+                               // indicate we've already sent a channel_ready
                                funding_tx_confirmations = 0;
                        }
 
-                       // If we've sent funding_locked (or have both sent and received funding_locked), and
+                       // If we've sent channel_ready (or have both sent and received channel_ready), and
                        // the funding transaction has become unconfirmed,
                        // close the channel and hope we can get the latest state on chain (because presumably
                        // the funding transaction is at least still in the mempool of most nodes).
                        //
-                       // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf channel,
-                       // but not doing so may lead to the `ChannelManager::short_to_id` map being
-                       // inconsistent, so we currently have to.
+                       // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
+                       // 0-conf channel, but not doing so may lead to the `ChannelManager::short_to_id` map
+                       // being inconsistent, so we currently have to.
                        if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() {
                                let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
                                        self.minimum_depth.unwrap(), funding_tx_confirmations);
@@ -4771,7 +4817,7 @@ impl<Signer: Sign> Channel<Signer> {
                        log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id));
                        // If funding_tx_confirmed_in is unset, the channel must not be active
                        assert!(non_shutdown_state <= ChannelState::ChannelFunded as u32);
-                       assert_eq!(non_shutdown_state & ChannelState::OurFundingLocked as u32, 0);
+                       assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
                        return Err(ClosureReason::FundingTimedOut);
                }
 
@@ -4783,7 +4829,7 @@ impl<Signer: Sign> Channel<Signer> {
 
        /// Indicates the funding transaction is no longer confirmed in the main chain. This may
        /// force-close the channel, but may also indicate a harmless reorganization of a block or two
-       /// before the channel has reached funding_locked and we can just wait for more blocks.
+       /// before the channel has reached channel_ready and we can just wait for more blocks.
        pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
                if self.funding_tx_confirmation_height != 0 {
                        // We handle the funding disconnection by calling best_block_updated with a height one
@@ -4794,8 +4840,8 @@ impl<Signer: Sign> Channel<Signer> {
                        // time we saw and it will be ignored.
                        let best_time = self.update_time_counter;
                        match self.do_best_block_updated(reorg_height, best_time, None, logger) {
-                               Ok((funding_locked, timed_out_htlcs, announcement_sigs)) => {
-                                       assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?");
+                               Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
+                                       assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
                                        assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
                                        Ok(())
@@ -4857,6 +4903,12 @@ impl<Signer: Sign> Channel<Signer> {
                self.inbound_awaiting_accept
        }
 
+       /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
+       pub fn set_0conf(&mut self) {
+               assert!(self.inbound_awaiting_accept);
+               self.minimum_depth = Some(0);
+       }
+
        /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
        /// should be sent back to the counterparty node.
        ///
@@ -4979,7 +5031,7 @@ impl<Signer: Sign> Channel<Signer> {
        }
 
        /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
-       /// announceable and available for use (have exchanged FundingLocked messages in both
+       /// announceable and available for use (have exchanged ChannelReady messages in both
        /// directions). Should be used for both broadcasted announcements and in response to an
        /// AnnouncementSignatures message from the remote peer.
        ///
@@ -5400,7 +5452,7 @@ impl<Signer: Sign> Channel<Signer> {
                                commitment_txid: counterparty_commitment_txid,
                                htlc_outputs: htlcs.clone(),
                                commitment_number: self.cur_counterparty_commitment_transaction_number,
-                               their_revocation_point: self.counterparty_cur_commitment_point.unwrap()
+                               their_per_commitment_point: self.counterparty_cur_commitment_point.unwrap()
                        }]
                };
                self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
@@ -5619,7 +5671,7 @@ impl<Signer: Sign> Channel<Signer> {
 }
 
 const SERIALIZATION_VERSION: u8 = 2;
-const MIN_SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 2;
 
 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
        (0, FailRelay),
@@ -5684,12 +5736,10 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
 
                self.user_id.write(writer)?;
 
-               // Write out the old serialization for the config object. This is read by version-1
-               // deserializers, but we will read the version in the TLV at the end instead.
-               self.config.forwarding_fee_proportional_millionths.write(writer)?;
-               self.config.cltv_expiry_delta.write(writer)?;
-               self.config.announced_channel.write(writer)?;
-               self.config.commit_upfront_shutdown_pubkey.write(writer)?;
+               // Version 1 deserializers expected to read parts of the config object here. Version 2
+               // deserializers (0.0.99) now read config through TLVs, and as we now require them for
+               // `minimum_depth` we simply write dummy values here.
+               writer.write_all(&[0; 8])?;
 
                self.channel_id.write(writer)?;
                (self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
@@ -5821,7 +5871,7 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
                        RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
                }
 
-               self.monitor_pending_funding_locked.write(writer)?;
+               self.monitor_pending_channel_ready.write(writer)?;
                self.monitor_pending_revoke_and_ack.write(writer)?;
                self.monitor_pending_commitment_signed.write(writer)?;
 
@@ -6082,7 +6132,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<Signer>
                        _ => return Err(DecodeError::InvalidValue),
                };
 
-               let monitor_pending_funding_locked = Readable::read(reader)?;
+               let monitor_pending_channel_ready = Readable::read(reader)?;
                let monitor_pending_revoke_and_ack = Readable::read(reader)?;
                let monitor_pending_commitment_signed = Readable::read(reader)?;
 
@@ -6292,7 +6342,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<Signer>
 
                        resend_order,
 
-                       monitor_pending_funding_locked,
+                       monitor_pending_channel_ready,
                        monitor_pending_revoke_and_ack,
                        monitor_pending_commitment_signed,
                        monitor_pending_forwards,
@@ -6667,7 +6717,7 @@ mod tests {
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
-               let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&logger).unwrap();
+               let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&logger).unwrap();
 
                // Node B --> Node A: funding signed
                let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&logger);
index cc5590f41f040c2dad067e91d60ba9b77b41f53f..17a52e29e4a5eecb210fe9d3f10c2edc4306f233 100644 (file)
@@ -93,6 +93,8 @@ use util::crypto::sign;
 pub(super) enum PendingHTLCRouting {
        Forward {
                onion_packet: msgs::OnionPacket,
+               /// The SCID from the onion that we should forward to. This could be a "real" SCID, an
+               /// outbound SCID alias, or a phantom node SCID.
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
        },
        Receive {
@@ -136,6 +138,8 @@ pub(super) enum HTLCForwardInfo {
                // `process_pending_htlc_forwards()` for constructing the
                // `HTLCSource::PreviousHopData` for failed and forwarded
                // HTLCs.
+               //
+               // Note that this may be an outbound SCID alias for the associated channel.
                prev_short_channel_id: u64,
                prev_htlc_id: u64,
                prev_funding_outpoint: OutPoint,
@@ -149,6 +153,7 @@ pub(super) enum HTLCForwardInfo {
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, Hash, PartialEq, Eq)]
 pub(crate) struct HTLCPreviousHopData {
+       // Note that this may be an outbound SCID alias for the associated channel.
        short_channel_id: u64,
        htlc_id: u64,
        incoming_packet_shared_secret: [u8; 32],
@@ -164,7 +169,7 @@ enum OnionPayload {
        Invoice {
                /// This is only here for backwards-compatibility in serialization, in the future it can be
                /// removed, breaking clients running 0.0.106 and earlier.
-               _legacy_hop_data: msgs::FinalOnionHopData,
+               _legacy_hop_data: Option<msgs::FinalOnionHopData>,
        },
        /// Contains the payer-provided preimage.
        Spontaneous(PaymentPreimage),
@@ -414,11 +419,13 @@ pub(super) struct ChannelHolder<Signer: Sign> {
        /// guarantees are made about the existence of a channel with the short id here, nor the short
        /// ids in the PendingHTLCInfo!
        pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
-       /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
+       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+       /// failed/claimed by the user.
+       ///
        /// Note that while this is held in the same mutex as the channels themselves, no consistency
        /// guarantees are made about the channels given here actually existing anymore by the time you
        /// go to read them!
-       claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
+       claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@ -963,9 +970,25 @@ pub struct ChannelDetails {
        /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
        /// payments instead of this. See [`get_inbound_payment_scid`].
        ///
+       /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may
+       /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`].
+       ///
        /// [`inbound_scid_alias`]: Self::inbound_scid_alias
+       /// [`outbound_scid_alias`]: Self::outbound_scid_alias
        /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
+       /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid
+       /// [`confirmations_required`]: Self::confirmations_required
        pub short_channel_id: Option<u64>,
+       /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and
+       /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when
+       /// the channel has not yet been confirmed (as long as [`confirmations_required`] is
+       /// `Some(0)`).
+       ///
+       /// This will be `None` as long as the channel is not available for routing outbound payments.
+       ///
+       /// [`short_channel_id`]: Self::short_channel_id
+       /// [`confirmations_required`]: Self::confirmations_required
+       pub outbound_scid_alias: Option<u64>,
        /// An optional [`short_channel_id`] alias for this channel, randomly generated by our
        /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
        /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
@@ -1051,18 +1074,18 @@ pub struct ChannelDetails {
        pub force_close_spend_delay: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
-       /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
-       /// channel is not currently being shut down. `funding_locked` message exchange implies the
+       /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
+       /// channel is not currently being shut down. `channel_ready` message exchange implies the
        /// required confirmation count has been reached (and we were connected to the peer at some
        /// point after the funding transaction received enough confirmations). The required
        /// confirmation count is provided in [`confirmations_required`].
        ///
        /// [`confirmations_required`]: ChannelDetails::confirmations_required
-       pub is_funding_locked: bool,
-       /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
+       pub is_channel_ready: bool,
+       /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
        /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
-       /// This is a strict superset of `is_funding_locked`.
+       /// This is a strict superset of `is_channel_ready`.
        pub is_usable: bool,
        /// True if this channel is (or will be) publicly-announced.
        pub is_public: bool,
@@ -1083,6 +1106,16 @@ impl ChannelDetails {
        pub fn get_inbound_payment_scid(&self) -> Option<u64> {
                self.inbound_scid_alias.or(self.short_channel_id)
        }
+
+       /// Gets the current SCID which should be used to identify this channel for outbound payments.
+       /// This should be used in [`Route`]s to describe the first hop or in other contexts where
+       /// we're sending or forwarding a payment outbound over this channel.
+       ///
+       /// This is either the [`ChannelDetails::short_channel_id`], if set, or the
+       /// [`ChannelDetails::outbound_scid_alias`]. See those for more information.
+       pub fn get_outbound_payment_scid(&self) -> Option<u64> {
+               self.short_channel_id.or(self.outbound_scid_alias)
+       }
 }
 
 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@ -1281,7 +1314,7 @@ macro_rules! remove_channel {
 }
 
 macro_rules! handle_monitor_err {
-       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateErr::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
@@ -1319,13 +1352,13 @@ macro_rules! handle_monitor_err {
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
+                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
@@ -1333,16 +1366,19 @@ macro_rules! handle_monitor_err {
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
                debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+       };
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new(), Vec::new())
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new())
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
        };
 }
 
@@ -1367,13 +1403,13 @@ macro_rules! maybe_break_monitor_err {
        }
 }
 
-macro_rules! send_funding_locked {
-       ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $funding_locked_msg: expr) => {
-               $pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+macro_rules! send_channel_ready {
+       ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
+               $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
                        node_id: $channel.get_counterparty_node_id(),
-                       msg: $funding_locked_msg,
+                       msg: $channel_ready_msg,
                });
-               // Note that we may send a funding locked multiple times for a channel if we reconnect, so
+               // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
                let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id());
                assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(),
@@ -1389,7 +1425,7 @@ macro_rules! send_funding_locked {
 macro_rules! handle_chan_restoration_locked {
        ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
         $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
-        $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr, $announcement_sigs: expr) => { {
+        $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
                let mut htlc_forwards = None;
 
                let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
@@ -1398,29 +1434,29 @@ macro_rules! handle_chan_restoration_locked {
                let res = loop {
                        let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
                        if !forwards.is_empty() {
-                               htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+                               htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
                                        $channel_entry.get().get_funding_txo().unwrap(), forwards));
                        }
 
                        if chanmon_update.is_some() {
-                               // On reconnect, we, by definition, only resend a funding_locked if there have been
+                               // On reconnect, we, by definition, only resend a channel_ready if there have been
                                // no commitment updates, so the only channel monitor update which could also be
-                               // associated with a funding_locked would be the funding_created/funding_signed
+                               // associated with a channel_ready would be the funding_created/funding_signed
                                // monitor update. That monitor update failing implies that we won't send
-                               // funding_locked until it's been updated, so we can't have a funding_locked and a
+                               // channel_ready until it's been updated, so we can't have a channel_ready and a
                                // monitor update here (so we don't bother to handle it correctly below).
-                               assert!($funding_locked.is_none());
-                               // A channel monitor update makes no sense without either a funding_locked or a
-                               // commitment update to process after it. Since we can't have a funding_locked, we
+                               assert!($channel_ready.is_none());
+                               // A channel monitor update makes no sense without either a channel_ready or a
+                               // commitment update to process after it. Since we can't have a channel_ready, we
                                // only bother to handle the monitor-update + commitment_update case below.
                                assert!($commitment_update.is_some());
                        }
 
-                       if let Some(msg) = $funding_locked {
-                               // Similar to the above, this implies that we're letting the funding_locked fly
+                       if let Some(msg) = $channel_ready {
+                               // Similar to the above, this implies that we're letting the channel_ready fly
                                // before it should be allowed to.
                                assert!(chanmon_update.is_none());
-                               send_funding_locked!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
+                               send_channel_ready!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
                        }
                        if let Some(msg) = $announcement_sigs {
                                $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
@@ -1706,6 +1742,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // `have_received_message` indicates that type negotiation has completed.
                                        channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
                                        short_channel_id: channel.get_short_channel_id(),
+                                       outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
                                        inbound_scid_alias: channel.latest_inbound_scid_alias(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
                                        unspendable_punishment_reserve: to_self_reserve_satoshis,
@@ -1717,7 +1754,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        confirmations_required: channel.minimum_depth(),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
-                                       is_funding_locked: channel.is_usable(),
+                                       is_channel_ready: channel.is_usable(),
                                        is_usable: channel.is_live(),
                                        is_public: channel.should_announce(),
                                        inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
@@ -2285,6 +2322,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                action: msgs::ErrorAction::IgnoreError
                        });
                }
+               if chan.get_short_channel_id().is_none() {
+                       return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
+               }
                log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
                self.get_channel_update_for_unicast(chan)
        }
@@ -2296,7 +2336,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// May be called with channel_state already locked!
        fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
                log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
-               let short_channel_id = match chan.get_short_channel_id() {
+               let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                };
@@ -2861,7 +2901,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                });
                                announced_chans = true;
                        } else {
-                               // If the channel is not public or has not yet reached funding_locked, check the
+                               // If the channel is not public or has not yet reached channel_ready, check the
                                // next channel. If we don't yet have any public channels, we'll skip the broadcast
                                // below as peers may not accept it without channels on chain first.
                        }
@@ -3098,7 +3138,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        prev_funding_outpoint } => {
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
                                                                        PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
-                                                                               let _legacy_hop_data = payment_data.clone();
+                                                                               let _legacy_hop_data = Some(payment_data.clone());
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
                                                                        },
                                                                        PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
@@ -3143,8 +3183,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                macro_rules! check_total_value {
                                                                        ($payment_data: expr, $payment_preimage: expr) => {{
                                                                                let mut payment_received_generated = false;
-                                                                               let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
-                                                                                       .or_insert(Vec::new());
+                                                                               let purpose = || {
+                                                                                       events::PaymentPurpose::InvoicePayment {
+                                                                                               payment_preimage: $payment_preimage,
+                                                                                               payment_secret: $payment_data.payment_secret,
+                                                                                       }
+                                                                               };
+                                                                               let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+                                                                                       .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
@@ -3175,11 +3221,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
                                                                                                payment_hash,
-                                                                                               purpose: events::PaymentPurpose::InvoicePayment {
-                                                                                                       payment_preimage: $payment_preimage,
-                                                                                                       payment_secret: $payment_data.payment_secret,
-                                                                                               },
-                                                                                               amt: total_value,
+                                                                                               purpose: purpose(),
+                                                                                               amount_msat: total_value,
                                                                                        });
                                                                                        payment_received_generated = true;
                                                                                } else {
@@ -3216,11 +3259,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        OnionPayload::Spontaneous(preimage) => {
                                                                                                match channel_state.claimable_htlcs.entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
-                                                                                                               e.insert(vec![claimable_htlc]);
+                                                                                                               let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
+                                                                                                               e.insert((purpose.clone(), vec![claimable_htlc]));
                                                                                                                new_events.push(events::Event::PaymentReceived {
                                                                                                                        payment_hash,
-                                                                                                                       amt: amt_to_forward,
-                                                                                                                       purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+                                                                                                                       amount_msat: amt_to_forward,
+                                                                                                                       purpose,
                                                                                                                });
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
@@ -3459,7 +3503,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        true
                                });
 
-                               channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                        if htlcs.is_empty() {
                                                // This should be unreachable
                                                debug_assert!(false);
@@ -3496,14 +3540,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
        /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
        /// along the path (including in our own channel on which we received it).
-       /// Returns false if no payment was found to fail backwards, true if the process of failing the
-       /// HTLC backwards has been started.
-       pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
+       ///
+       /// Note that in some cases around unclean shutdown, it is possible the payment may have
+       /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
+       /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment
+       /// may have already been failed automatically by LDK if it was nearing its expiration time.
+       ///
+       /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
+       /// [`ChannelManager::claim_funds`]), you should still monitor for
+       /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
+       /// startup during which time claims that were in-progress at shutdown may be replayed.
+       pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state = Some(self.channel_state.lock().unwrap());
                let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
-               if let Some(mut sources) = removed_source {
+               if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
@@ -3513,8 +3565,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
                                                HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
                        }
-                       true
-               } else { false }
+               }
        }
 
        /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
@@ -3784,26 +3835,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
        /// [`MessageSendEvent`]s needed to claim the payment.
        ///
+       /// Note that calling this method does *not* guarantee that the payment has been claimed. You
+       /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
+       /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+       ///
        /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
        /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
        /// event matches your expectation. If you fail to do so and call this method, you may provide
        /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
        ///
-       /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now
-       /// pending for processing via [`get_and_clear_pending_msg_events`].
-       ///
        /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
+       /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
+       /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
        /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
-       pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
+       pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state = Some(self.channel_state.lock().unwrap());
                let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
-               if let Some(mut sources) = removed_source {
+               if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
 
                        // If we are claiming an MPP payment, we have to take special care to ensure that each
@@ -3817,12 +3871,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // we got all the HTLCs and then a channel closed while we were waiting for the user to
                        // provide the preimage, so worrying too much about the optimal handling isn't worth
                        // it.
+                       let mut claimable_amt_msat = 0;
+                       let mut expected_amt_msat = None;
                        let mut valid_mpp = true;
                        for htlc in sources.iter() {
                                if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
                                        valid_mpp = false;
                                        break;
                                }
+                               if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
+                                       log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+                                       debug_assert!(false);
+                                       valid_mpp = false;
+                                       break;
+                               }
+                               expected_amt_msat = Some(htlc.total_msat);
+                               if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
+                                       // We don't currently support MPP for spontaneous payments, so just check
+                                       // that there's one payment here and move on.
+                                       if sources.len() != 1 {
+                                               log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
+                                               debug_assert!(false);
+                                               valid_mpp = false;
+                                               break;
+                                       }
+                               }
+
+                               claimable_amt_msat += htlc.value;
+                       }
+                       if sources.is_empty() || expected_amt_msat.is_none() {
+                               log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
+                               return;
+                       }
+                       if claimable_amt_msat != expected_amt_msat.unwrap() {
+                               log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
+                                       expected_amt_msat.unwrap(), claimable_amt_msat);
+                               return;
                        }
 
                        let mut errs = Vec::new();
@@ -3858,6 +3942,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                        }
 
+                       if claimed_any_htlcs {
+                               self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+                                       payment_hash,
+                                       purpose: payment_purpose,
+                                       amount_msat: claimable_amt_msat,
+                               });
+                       }
+
                        // Now that we've done the entire above loop in one lock, we can handle any errors
                        // which were generated.
                        channel_state.take();
@@ -3866,9 +3958,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                let res: Result<(), _> = Err(err);
                                let _ = handle_error!(self, res, counterparty_node_id);
                        }
-
-                       claimed_any_htlcs
-               } else { false }
+               }
        }
 
        fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
@@ -4085,9 +4175,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
-                       let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() {
+                       let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
-                               // funding_locked and the channel is in a usable state. We may re-send a
+                               // channel_ready and the channel is in a usable state. We may re-send a
                                // channel_update later through the announcement_signatures process for public
                                // channels, but there's no reason not to just inform our counterparty of our fees
                                // now.
@@ -4098,7 +4188,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        })
                                } else { None }
                        } else { None };
-                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked, updates.announcement_sigs);
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
@@ -4111,20 +4201,45 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Called to accept a request to open a channel after [`Event::OpenChannelRequest`] has been
-       /// triggered.
+       /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
        ///
        /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
        /// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
        /// the channel.
        ///
-       /// For inbound channels, the `user_channel_id` parameter will be provided back in
+       /// The `user_channel_id` parameter will be provided back in
        /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
-       /// with which `accept_inbound_channel` call.
+       /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
        ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
        pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+               self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
+       }
+
+       /// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
+       /// it as confirmed immediately.
+       ///
+       /// The `user_channel_id` parameter will be provided back in
+       /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
+       /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
+       ///
+       /// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
+       /// and (if the counterparty agrees), enables forwarding of payments immediately.
+       ///
+       /// This fully trusts that the counterparty has honestly and correctly constructed the funding
+       /// transaction and blindly assumes that it will eventually confirm.
+       ///
+       /// If it does not confirm before we decide to close the channel, or if the funding transaction
+       /// does not pay to the correct script the correct amount, *you will lose funds*.
+       ///
+       /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
+       /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
+       pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+               self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
+       }
+
+       fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
@@ -4137,6 +4252,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if *counterparty_node_id != channel.get().get_counterparty_node_id() {
                                        return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                }
+                               if accept_0conf { channel.get_mut().set_0conf(); }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
                                        msg: channel.get_mut().accept_inbound_channel(user_channel_id),
@@ -4228,7 +4344,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
-               let ((funding_msg, monitor), mut chan) = {
+               let ((funding_msg, monitor, mut channel_ready), mut chan) = {
                        let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
@@ -4261,9 +4377,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                ChannelMonitorUpdateErr::TemporaryFailure => {
                                        // There's no problem signing a counterparty's funding transaction if our monitor
                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                                       // accepted payment from yet. We do, however, need to wait to send our funding_locked
+                                       // accepted payment from yet. We do, however, need to wait to send our channel_ready
                                        // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new());
+                                       chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+                                       channel_ready = None; // Don't send the channel_ready now
                                },
                        }
                }
@@ -4278,6 +4395,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
+                               if let Some(msg) = channel_ready {
+                                       send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg);
+                               }
                                e.insert(chan);
                        }
                }
@@ -4294,12 +4414,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
+                                       let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
                                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                                        // We weren't able to watch the channel to begin with, so no updates should be made on
                                                        // it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4310,6 +4430,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                }
                                                return res
                                        }
+                                       if let Some(msg) = channel_ready {
+                                               send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg);
+                                       }
                                        funding_tx
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -4320,7 +4443,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       fn internal_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
+       fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(msg.channel_id) {
@@ -4328,7 +4451,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().funding_locked(&msg, self.get_our_node_id(),
+                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
                                        self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan);
                                if let Some(announcement_sigs) = announcement_sigs_opt {
                                        log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
@@ -4660,7 +4783,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                } else {
                                                        if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
                                                                        RAACommitmentOrder::CommitmentFirst, false,
-                                                                       raa_updates.commitment_update.is_some(),
+                                                                       raa_updates.commitment_update.is_some(), false,
                                                                        raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
                                                                        raa_updates.finalized_claimed_htlcs) {
                                                                break Err(e);
@@ -4676,7 +4799,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
                                                        raa_updates.finalized_claimed_htlcs,
                                                        chan.get().get_short_channel_id()
-                                                               .expect("RAA should only work on a short-id-available channel"),
+                                                               .unwrap_or(chan.get().outbound_scid_alias()),
                                                        chan.get().get_funding_txo().unwrap()))
                                },
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -4812,7 +4935,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
                                        chan_restoration_res = handle_chan_restoration_locked!(
                                                self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
-                                               responses.mon_update, Vec::new(), None, responses.funding_locked, responses.announcement_sigs);
+                                               responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
                                        if let Some(upd) = channel_update {
                                                channel_state.pending_msg_events.push(upd);
                                        }
@@ -4824,8 +4947,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
 
-               if let Some(funding_locked_msg) = need_lnd_workaround {
-                       self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
+               if let Some(channel_ready_msg) = need_lnd_workaround {
+                       self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
                Ok(())
        }
@@ -4841,7 +4964,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        MonitorEvent::HTLCEvent(htlc_update) => {
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
@@ -5462,7 +5585,7 @@ where
        /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
        /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
        /// the function.
-       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
+       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
                        (&self, height_opt: Option<u32>, f: FN) {
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // during initialization prior to the chain_monitor being fully configured in some cases.
@@ -5477,17 +5600,17 @@ where
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                let res = f(channel);
-                               if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+                               if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
                                                }));
                                        }
-                                       if let Some(funding_locked) = funding_locked_opt {
-                                               send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked);
+                                       if let Some(channel_ready) = channel_ready_opt {
+                                               send_channel_ready!(short_to_id, pending_msg_events, channel, channel_ready);
                                                if channel.is_usable() {
-                                                       log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
                                                        if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                        node_id: channel.get_counterparty_node_id(),
@@ -5495,7 +5618,7 @@ where
                                                                });
                                                        }
                                                } else {
-                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+                                                       log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
                                                }
                                        }
                                        if let Some(announcement_sigs) = announcement_sigs {
@@ -5515,6 +5638,19 @@ where
                                                        }
                                                }
                                        }
+                                       if channel.is_our_channel_ready() {
+                                               if let Some(real_scid) = channel.get_short_channel_id() {
+                                                       // If we sent a 0conf channel_ready, and now have an SCID, we add it
+                                                       // to the short_to_id map here. Note that we check whether we can relay
+                                                       // using the real SCID at relay-time (i.e. enforce option_scid_alias
+                                                       // then), and if the funding tx is ever un-confirmed we force-close the
+                                                       // channel, ensuring short_to_id is always consistent.
+                                                       let scid_insert = short_to_id.insert(real_scid, channel.channel_id());
+                                                       assert!(scid_insert.is_none() || scid_insert.unwrap() == channel.channel_id(),
+                                                               "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+                                                               fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+                                               }
+                                       }
                                } else if let Err(reason) = res {
                                        update_maps_on_chan_removal!(self, short_to_id, channel);
                                        // It looks like our counterparty went on-chain or funding transaction was
@@ -5540,7 +5676,7 @@ where
                        });
 
                        if let Some(height) = height_opt {
-                               channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                        htlcs.retain(|htlc| {
                                                // If height is approaching the number of blocks we think it takes us to get
                                                // our commitment transaction confirmed before the HTLC expires, plus the
@@ -5629,9 +5765,9 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
        }
 
-       fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+       fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
+               let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
@@ -5729,7 +5865,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != counterparty_node_id,
+                                       &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
@@ -5927,6 +6063,7 @@ impl_writeable_tlv_based!(ChannelDetails, {
        (2, channel_id, required),
        (3, channel_type, option),
        (4, counterparty, required),
+       (5, outbound_scid_alias, option),
        (6, funding_txo, option),
        (8, short_channel_id, option),
        (10, channel_value_satoshis, required),
@@ -5941,7 +6078,7 @@ impl_writeable_tlv_based!(ChannelDetails, {
        (22, confirmations_required, option),
        (24, force_close_spend_delay, option),
        (26, is_outbound, required),
-       (28, is_funding_locked, required),
+       (28, is_channel_ready, required),
        (30, is_usable, required),
        (32, is_public, required),
        (33, inbound_htlc_minimum_msat, option),
@@ -6061,13 +6198,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 
 impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               let payment_data = match &self.onion_payload {
-                       OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data),
-                       _ => None,
-               };
-               let keysend_preimage = match self.onion_payload {
-                       OnionPayload::Invoice { .. } => None,
-                       OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
+               let (payment_data, keysend_preimage) = match &self.onion_payload {
+                       OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
+                       OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
                };
                write_tlv_fields!(writer, {
                        (0, self.prev_hop, required),
@@ -6108,13 +6241,13 @@ impl Readable for ClaimableHTLC {
                                OnionPayload::Spontaneous(p)
                        },
                        None => {
-                               if payment_data.is_none() {
-                                       return Err(DecodeError::InvalidValue)
-                               }
                                if total_msat.is_none() {
+                                       if payment_data.is_none() {
+                                               return Err(DecodeError::InvalidValue)
+                                       }
                                        total_msat = Some(payment_data.as_ref().unwrap().total_msat);
                                }
-                               OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() }
+                               OnionPayload::Invoice { _legacy_hop_data: payment_data }
                        },
                };
                Ok(Self {
@@ -6287,13 +6420,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        }
                }
 
+               let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
                (channel_state.claimable_htlcs.len() as u64).write(writer)?;
-               for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
+               for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
                                htlc.write(writer)?;
                        }
+                       htlc_purposes.push(purpose);
                }
 
                let per_peer_state = self.per_peer_state.write().unwrap();
@@ -6370,6 +6505,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        (3, pending_outbound_payments, required),
                        (5, self.our_network_pubkey, required),
                        (7, self.fake_scid_rand_bytes, required),
+                       (9, htlc_purposes, vec_type),
                });
 
                Ok(())
@@ -6588,15 +6724,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let claimable_htlcs_count: u64 = Readable::read(reader)?;
-               let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
+               let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
                for _ in 0..claimable_htlcs_count {
                        let payment_hash = Readable::read(reader)?;
                        let previous_hops_len: u64 = Readable::read(reader)?;
                        let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
                        for _ in 0..previous_hops_len {
-                               previous_hops.push(Readable::read(reader)?);
+                               previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
                        }
-                       claimable_htlcs.insert(payment_hash, previous_hops);
+                       claimable_htlcs_list.push((payment_hash, previous_hops));
                }
 
                let peer_count: u64 = Readable::read(reader)?;
@@ -6666,11 +6802,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut pending_outbound_payments = None;
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
+               let mut claimable_htlc_purposes = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (3, pending_outbound_payments, option),
                        (5, received_network_pubkey, option),
                        (7, fake_scid_rand_bytes, option),
+                       (9, claimable_htlc_purposes, vec_type),
                });
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
@@ -6693,7 +6831,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        // payments which are still in-flight via their on-chain state.
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
-                       for (_, monitor) in args.channel_monitors {
+                       for (_, monitor) in args.channel_monitors.iter() {
                                if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
                                        for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
@@ -6731,6 +6869,49 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
+               let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
+               let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
+
+               let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
+               if let Some(mut purposes) = claimable_htlc_purposes {
+                       if purposes.len() != claimable_htlcs_list.len() {
+                               return Err(DecodeError::InvalidValue);
+                       }
+                       for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
+                               claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+                       }
+               } else {
+                       // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
+                       // include a `_legacy_hop_data` in the `OnionPayload`.
+                       for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
+                               if previous_hops.is_empty() {
+                                       return Err(DecodeError::InvalidValue);
+                               }
+                               let purpose = match &previous_hops[0].onion_payload {
+                                       OnionPayload::Invoice { _legacy_hop_data } => {
+                                               if let Some(hop_data) = _legacy_hop_data {
+                                                       events::PaymentPurpose::InvoicePayment {
+                                                               payment_preimage: match pending_inbound_payments.get(&payment_hash) {
+                                                                       Some(inbound_payment) => inbound_payment.payment_preimage,
+                                                                       None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
+                                                                               Ok(payment_preimage) => payment_preimage,
+                                                                               Err(()) => {
+                                                                                       log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0));
+                                                                                       return Err(DecodeError::InvalidValue);
+                                                                               }
+                                                                       }
+                                                               },
+                                                               payment_secret: hop_data.payment_secret,
+                                                       }
+                                               } else { return Err(DecodeError::InvalidValue); }
+                                       },
+                                       OnionPayload::Spontaneous(payment_preimage) =>
+                                               events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
+                               };
+                               claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+                       }
+               }
+
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
 
@@ -6776,8 +6957,46 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
-               let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
-               let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
+               for (_, monitor) in args.channel_monitors.iter() {
+                       for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
+                               if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
+                                       log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
+                                       let mut claimable_amt_msat = 0;
+                                       for claimable_htlc in claimable_htlcs {
+                                               claimable_amt_msat += claimable_htlc.value;
+
+                                               // Add a holding-cell claim of the payment to the Channel, which should be
+                                               // applied ~immediately on peer reconnection. Because it won't generate a
+                                               // new commitment transaction we can just provide the payment preimage to
+                                               // the corresponding ChannelMonitor and nothing else.
+                                               //
+                                               // We do so directly instead of via the normal ChannelMonitor update
+                                               // procedure as the ChainMonitor hasn't yet been initialized, implying
+                                               // we're not allowed to call it directly yet. Further, we do the update
+                                               // without incrementing the ChannelMonitor update ID as there isn't any
+                                               // reason to.
+                                               // If we were to generate a new ChannelMonitor update ID here and then
+                                               // crash before the user finishes block connect we'd end up force-closing
+                                               // this channel as well. On the flip side, there's no harm in restarting
+                                               // without the new monitor persisted - we'll end up right back here on
+                                               // restart.
+                                               let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+                                               if let Some(channel) = by_id.get_mut(&previous_channel_id) {
+                                                       channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+                                               }
+                                               if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
+                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger);
+                                               }
+                                       }
+                                       pending_events_read.push(events::Event::PaymentClaimed {
+                                               payment_hash,
+                                               purpose: payment_purpose,
+                                               amount_msat: claimable_amt_msat,
+                                       });
+                               }
+                       }
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
@@ -7031,8 +7250,10 @@ mod tests {
                // claim_funds_along_route because the ordering of the messages causes the second half of the
                // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
                // lightning messages manually.
-               assert!(nodes[1].node.claim_funds(payment_preimage));
+               nodes[1].node.claim_funds(payment_preimage);
+               expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
                check_added_monitors!(nodes[1], 2);
+
                let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
@@ -7431,12 +7652,12 @@ pub mod bench {
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
 
-               node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
+               node_a.handle_channel_ready(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
                let msg_events = node_a.get_and_clear_pending_msg_events();
                assert_eq!(msg_events.len(), 2);
                match msg_events[0] {
-                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
-                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                       MessageSendEvent::SendChannelReady { ref msg, .. } => {
+                               node_b.handle_channel_ready(&node_a.get_our_node_id(), msg);
                                get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
                        },
                        _ => panic!(),
@@ -7478,7 +7699,8 @@ pub mod bench {
 
                                expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
                                expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
-                               assert!($node_b.claim_funds(payment_preimage));
+                               $node_b.claim_funds(payment_preimage);
+                               expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
 
                                match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
                                        MessageSendEvent::UpdateHTLCs { node_id, updates } => {
index c0e33e5b93a51a4d620f6c19eec016a15468f70e..8929a9774381e9342f20790e094e51c64b36d959 100644 (file)
@@ -21,6 +21,7 @@ use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
 use util::enforcing_trait_impls::EnforcingSigner;
+use util::scid_utils;
 use util::test_utils;
 use util::test_utils::{panicking, TestChainMonitor};
 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
@@ -34,6 +35,8 @@ use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::network::constants::Network;
 
 use bitcoin::hash_types::BlockHash;
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::Hash as _;
 
 use bitcoin::secp256k1::PublicKey;
 
@@ -48,9 +51,13 @@ pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
 /// Mine the given transaction in the next block and then mine CHAN_CONFIRM_DEPTH - 1 blocks on
 /// top, giving the given transaction CHAN_CONFIRM_DEPTH confirmations.
-pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
-       confirm_transaction_at(node, tx, node.best_block_info().1 + 1);
+///
+/// Returns the SCID a channel confirmed in the given transaction will have, assuming the funding
+/// output is the 1st output in the transaction.
+pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> u64 {
+       let scid = confirm_transaction_at(node, tx, node.best_block_info().1 + 1);
        connect_blocks(node, CHAN_CONFIRM_DEPTH - 1);
+       scid
 }
 /// Mine a signle block containing the given transaction
 pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
@@ -59,7 +66,10 @@ pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transac
 }
 /// Mine the given transaction at the given height, mining blocks as required to build to that
 /// height
-pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
+///
+/// Returns the SCID a channel confirmed in the given transaction will have, assuming the funding
+/// output is the 1st output in the transaction.
+pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) -> u64 {
        let first_connect_height = node.best_block_info().1 + 1;
        assert!(first_connect_height <= conf_height);
        if conf_height > first_connect_height {
@@ -74,31 +84,64 @@ pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &T
        }
        block.txdata.push(tx.clone());
        connect_block(node, &block);
+       scid_utils::scid_from_parts(conf_height as u64, block.txdata.len() as u64 - 1, 0).unwrap()
 }
 
 /// The possible ways we may notify a ChannelManager of a new block
-#[derive(Clone, Copy, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq)]
 pub enum ConnectStyle {
-       /// Calls best_block_updated first, detecting transactions in the block only after receiving the
-       /// header and height information.
+       /// Calls `best_block_updated` first, detecting transactions in the block only after receiving
+       /// the header and height information.
        BestBlockFirst,
-       /// The same as BestBlockFirst, however when we have multiple blocks to connect, we only
-       /// make a single best_block_updated call.
+       /// The same as `BestBlockFirst`, however when we have multiple blocks to connect, we only
+       /// make a single `best_block_updated` call.
        BestBlockFirstSkippingBlocks,
-       /// Calls transactions_confirmed first, detecting transactions in the block before updating the
-       /// header and height information.
+       /// The same as `BestBlockFirst` when connecting blocks. During disconnection only
+       /// `transaction_unconfirmed` is called.
+       BestBlockFirstReorgsOnlyTip,
+       /// Calls `transactions_confirmed` first, detecting transactions in the block before updating
+       /// the header and height information.
        TransactionsFirst,
-       /// The same as TransactionsFirst, however when we have multiple blocks to connect, we only
-       /// make a single best_block_updated call.
+       /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
+       /// make a single `best_block_updated` call.
        TransactionsFirstSkippingBlocks,
-       /// Provides the full block via the chain::Listen interface. In the current code this is
-       /// equivalent to TransactionsFirst with some additional assertions.
+       /// The same as `TransactionsFirst` when connecting blocks. During disconnection only
+       /// `transaction_unconfirmed` is called.
+       TransactionsFirstReorgsOnlyTip,
+       /// Provides the full block via the `chain::Listen` interface. In the current code this is
+       /// equivalent to `TransactionsFirst` with some additional assertions.
        FullBlockViaListen,
 }
 
+impl ConnectStyle {
+       fn random_style() -> ConnectStyle {
+               #[cfg(feature = "std")] {
+                       use core::hash::{BuildHasher, Hasher};
+                       // Get a random value using the only std API to do so - the DefaultHasher
+                       let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+                       let res = match rand_val % 7 {
+                               0 => ConnectStyle::BestBlockFirst,
+                               1 => ConnectStyle::BestBlockFirstSkippingBlocks,
+                               2 => ConnectStyle::BestBlockFirstReorgsOnlyTip,
+                               3 => ConnectStyle::TransactionsFirst,
+                               4 => ConnectStyle::TransactionsFirstSkippingBlocks,
+                               5 => ConnectStyle::TransactionsFirstReorgsOnlyTip,
+                               6 => ConnectStyle::FullBlockViaListen,
+                               _ => unreachable!(),
+                       };
+                       eprintln!("Using Block Connection Style: {:?}", res);
+                       res
+               }
+               #[cfg(not(feature = "std"))] {
+                       ConnectStyle::FullBlockViaListen
+               }
+       }
+}
+
 pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
        let skip_intermediaries = match *node.connect_style.borrow() {
-               ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => true,
+               ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks|
+                       ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::TransactionsFirstReorgsOnlyTip => true,
                _ => false,
        };
 
@@ -109,18 +152,20 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
        };
        assert!(depth >= 1);
        for i in 1..depth {
-               do_connect_block(node, &block, skip_intermediaries);
+               let prev_blockhash = block.header.block_hash();
+               do_connect_block(node, block, skip_intermediaries);
                block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
        }
-       connect_block(node, &block);
-       block.header.block_hash()
+       let hash = block.header.block_hash();
+       do_connect_block(node, block, false);
+       hash
 }
 
 pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
-       do_connect_block(node, block, false);
+       do_connect_block(node, block.clone(), false);
 }
 
 fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
@@ -130,20 +175,23 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
        }
 }
 
-fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, skip_intermediaries: bool) {
+fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
        call_claimable_balances(node);
        let height = node.best_block_info().1 + 1;
+       #[cfg(feature = "std")] {
+               eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
+       }
        if !skip_intermediaries {
                let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
                match *node.connect_style.borrow() {
-                       ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks => {
+                       ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::BestBlockFirstReorgsOnlyTip => {
                                node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
                                call_claimable_balances(node);
                                node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
                                node.node.best_block_updated(&block.header, height);
                                node.node.transactions_confirmed(&block.header, &txdata, height);
                        },
-                       ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks => {
+                       ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks|ConnectStyle::TransactionsFirstReorgsOnlyTip => {
                                node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
                                call_claimable_balances(node);
                                node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
@@ -158,30 +206,39 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, s
        }
        call_claimable_balances(node);
        node.node.test_process_background_events();
-       node.blocks.lock().unwrap().push((block.header, height));
+       node.blocks.lock().unwrap().push((block, height));
 }
 
 pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) {
        call_claimable_balances(node);
+       #[cfg(feature = "std")] {
+               eprintln!("Disconnecting {} blocks using Block Connection Style: {:?}", count, *node.connect_style.borrow());
+       }
        for i in 0..count {
-               let orig_header = node.blocks.lock().unwrap().pop().unwrap();
-               assert!(orig_header.1 > 0); // Cannot disconnect genesis
-               let prev_header = node.blocks.lock().unwrap().last().unwrap().clone();
+               let orig = node.blocks.lock().unwrap().pop().unwrap();
+               assert!(orig.1 > 0); // Cannot disconnect genesis
+               let prev = node.blocks.lock().unwrap().last().unwrap().clone();
 
                match *node.connect_style.borrow() {
                        ConnectStyle::FullBlockViaListen => {
-                               node.chain_monitor.chain_monitor.block_disconnected(&orig_header.0, orig_header.1);
-                               Listen::block_disconnected(node.node, &orig_header.0, orig_header.1);
+                               node.chain_monitor.chain_monitor.block_disconnected(&orig.0.header, orig.1);
+                               Listen::block_disconnected(node.node, &orig.0.header, orig.1);
                        },
                        ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
                                if i == count - 1 {
-                                       node.chain_monitor.chain_monitor.best_block_updated(&prev_header.0, prev_header.1);
-                                       node.node.best_block_updated(&prev_header.0, prev_header.1);
+                                       node.chain_monitor.chain_monitor.best_block_updated(&prev.0.header, prev.1);
+                                       node.node.best_block_updated(&prev.0.header, prev.1);
+                               }
+                       },
+                       ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::TransactionsFirstReorgsOnlyTip => {
+                               for tx in orig.0.txdata {
+                                       node.chain_monitor.chain_monitor.transaction_unconfirmed(&tx.txid());
+                                       node.node.transaction_unconfirmed(&tx.txid());
                                }
                        },
                        _ => {
-                               node.chain_monitor.chain_monitor.best_block_updated(&prev_header.0, prev_header.1);
-                               node.node.best_block_updated(&prev_header.0, prev_header.1);
+                               node.chain_monitor.chain_monitor.best_block_updated(&prev.0.header, prev.1);
+                               node.node.best_block_updated(&prev.0.header, prev.1);
                        },
                }
                call_claimable_balances(node);
@@ -227,7 +284,7 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> {
        pub network_payment_count: Rc<RefCell<u8>>,
        pub network_chan_count: Rc<RefCell<u32>>,
        pub logger: &'c test_utils::TestLogger,
-       pub blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>,
+       pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
        pub connect_style: Rc<RefCell<ConnectStyle>>,
 }
 impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
@@ -238,7 +295,7 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
                self.blocks.lock().unwrap().last().map(|(a, b)| (a.block_hash(), *b)).unwrap()
        }
        pub fn get_block_header(&self, height: u32) -> BlockHeader {
-               self.blocks.lock().unwrap()[height as usize].0
+               self.blocks.lock().unwrap()[height as usize].0.header
        }
 }
 
@@ -343,8 +400,8 @@ pub fn create_chan_between_nodes<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, n
 }
 
 pub fn create_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
-       let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
+       let (channel_ready, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
+       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &channel_ready);
        (announcement, as_update, bs_update, channel_id, tx)
 }
 
@@ -628,10 +685,10 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
 pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
        confirm_transaction_at(node_conf, tx, conf_height);
        connect_blocks(node_conf, CHAN_CONFIRM_DEPTH - 1);
-       node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id()));
+       node_recv.node.handle_channel_ready(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendChannelReady, node_recv.node.get_our_node_id()));
 }
 
-pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) {
        let channel_id;
        let events_6 = node_conf.node.get_and_clear_pending_msg_events();
        assert_eq!(events_6.len(), 3);
@@ -643,7 +700,7 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv
                1
        } else { panic!("Unexpected event: {:?}", events_6[1]); };
        ((match events_6[0] {
-               MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
+               MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
                        channel_id = msg.channel_id.clone();
                        assert_eq!(*node_id, node_recv.node.get_our_node_id());
                        msg.clone()
@@ -658,7 +715,7 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv
        }), channel_id)
 }
 
-pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) {
        let conf_height = core::cmp::max(node_a.best_block_info().1 + 1, node_b.best_block_info().1 + 1);
        create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height);
        confirm_transaction_at(node_a, tx, conf_height);
@@ -666,14 +723,14 @@ pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a
        create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 }
 
-pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
        let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
        let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
        (msgs, chan_id, tx)
 }
 
-pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
-       node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0);
+pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::ChannelReady, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
+       node_b.node.handle_channel_ready(&node_a.node.get_our_node_id(), &as_funding_msgs.0);
        let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
        node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1);
 
@@ -714,7 +771,7 @@ pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a
        (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
 }
 
-pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::FundingLocked, Transaction) {
+pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelReady, Transaction) {
        let mut no_announce_cfg = test_default_channel_config();
        no_announce_cfg.channel_options.announced_channel = false;
        nodes[a].node.create_channel(nodes[b].node.get_our_node_id(), channel_value, push_msat, 42, Some(no_announce_cfg)).unwrap();
@@ -737,10 +794,10 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &
        connect_blocks(&nodes[a], CHAN_CONFIRM_DEPTH - 1);
        confirm_transaction_at(&nodes[b], &tx, conf_height);
        connect_blocks(&nodes[b], CHAN_CONFIRM_DEPTH - 1);
-       let as_funding_locked = get_event_msg!(nodes[a], MessageSendEvent::SendFundingLocked, nodes[b].node.get_our_node_id());
-       nodes[a].node.handle_funding_locked(&nodes[b].node.get_our_node_id(), &get_event_msg!(nodes[b], MessageSendEvent::SendFundingLocked, nodes[a].node.get_our_node_id()));
+       let as_channel_ready = get_event_msg!(nodes[a], MessageSendEvent::SendChannelReady, nodes[b].node.get_our_node_id());
+       nodes[a].node.handle_channel_ready(&nodes[b].node.get_our_node_id(), &get_event_msg!(nodes[b], MessageSendEvent::SendChannelReady, nodes[a].node.get_our_node_id()));
        let as_update = get_event_msg!(nodes[a], MessageSendEvent::SendChannelUpdate, nodes[b].node.get_our_node_id());
-       nodes[b].node.handle_funding_locked(&nodes[a].node.get_our_node_id(), &as_funding_locked);
+       nodes[b].node.handle_channel_ready(&nodes[a].node.get_our_node_id(), &as_channel_ready);
        let bs_update = get_event_msg!(nodes[b], MessageSendEvent::SendChannelUpdate, nodes[a].node.get_our_node_id());
 
        nodes[a].node.handle_channel_update(&nodes[b].node.get_our_node_id(), &bs_update);
@@ -748,7 +805,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &
 
        let mut found_a = false;
        for chan in nodes[a].node.list_usable_channels() {
-               if chan.channel_id == as_funding_locked.channel_id {
+               if chan.channel_id == as_channel_ready.channel_id {
                        assert!(!found_a);
                        found_a = true;
                        assert!(!chan.is_public);
@@ -758,7 +815,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &
 
        let mut found_b = false;
        for chan in nodes[b].node.list_usable_channels() {
-               if chan.channel_id == as_funding_locked.channel_id {
+               if chan.channel_id == as_channel_ready.channel_id {
                        assert!(!found_b);
                        found_b = true;
                        assert!(!chan.is_public);
@@ -766,7 +823,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &
        }
        assert!(found_b);
 
-       (as_funding_locked, tx)
+       (as_channel_ready, tx)
 }
 
 pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, ann: &msgs::ChannelAnnouncement, upd_1: &msgs::ChannelUpdate, upd_2: &msgs::ChannelUpdate) {
@@ -1250,9 +1307,9 @@ macro_rules! expect_payment_received {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
                match events[0] {
-                       $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+                       $crate::util::events::Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                                assert_eq!($expected_payment_hash, *payment_hash);
-                               assert_eq!($expected_recv_value, amt);
+                               assert_eq!($expected_recv_value, amount_msat);
                                match purpose {
                                        $crate::util::events::PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                                assert_eq!(&$expected_payment_preimage, payment_preimage);
@@ -1266,6 +1323,22 @@ macro_rules! expect_payment_received {
        }
 }
 
+#[macro_export]
+#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+macro_rules! expect_payment_claimed {
+       ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       $crate::util::events::Event::PaymentClaimed { ref payment_hash, amount_msat, .. } => {
+                               assert_eq!($expected_payment_hash, *payment_hash);
+                               assert_eq!($expected_recv_value, amount_msat);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+}
+
 #[cfg(test)]
 #[macro_export]
 macro_rules! expect_payment_sent_without_paths {
@@ -1476,7 +1549,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>,
        payment_id
 }
 
-pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option<PaymentPreimage>) {
+pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_received_expected: bool, clear_recipient_events: bool, expected_preimage: Option<PaymentPreimage>) {
        let mut payment_event = SendEvent::from_event(ev);
        let mut prev_node = origin_node;
 
@@ -1489,12 +1562,12 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path
 
                expect_pending_htlcs_forwardable!(node);
 
-               if idx == expected_path.len() - 1 {
+               if idx == expected_path.len() - 1 && clear_recipient_events {
                        let events_2 = node.node.get_and_clear_pending_events();
                        if payment_received_expected {
                                assert_eq!(events_2.len(), 1);
                                match events_2[0] {
-                                       Event::PaymentReceived { ref payment_hash, ref purpose, amt} => {
+                                       Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                                                assert_eq!(our_payment_hash, *payment_hash);
                                                match &purpose {
                                                        PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
@@ -1506,14 +1579,14 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path
                                                                assert!(our_payment_secret.is_none());
                                                        },
                                                }
-                                               assert_eq!(amt, recv_value);
+                                               assert_eq!(amount_msat, recv_value);
                                        },
                                        _ => panic!("Unexpected event"),
                                }
                        } else {
                                assert!(events_2.is_empty());
                        }
-               } else {
+               } else if idx != expected_path.len() - 1 {
                        let mut events_2 = node.node.get_and_clear_pending_msg_events();
                        assert_eq!(events_2.len(), 1);
                        check_added_monitors!(node, 1);
@@ -1525,6 +1598,10 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path
        }
 }
 
+pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option<PaymentPreimage>) {
+       do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_received_expected, true, expected_preimage);
+}
+
 pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) {
        let mut events = origin_node.node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), expected_route.len());
@@ -1546,7 +1623,19 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>,
        for path in expected_paths.iter() {
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
-       assert!(expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage));
+       expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
+
+       let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
+       assert_eq!(claim_event.len(), 1);
+       match claim_event[0] {
+               Event::PaymentClaimed { purpose: PaymentPurpose::SpontaneousPayment(preimage), .. }|
+               Event::PaymentClaimed { purpose: PaymentPurpose::InvoicePayment { payment_preimage: Some(preimage), ..}, .. } =>
+                       assert_eq!(preimage, our_payment_preimage),
+               Event::PaymentClaimed { purpose: PaymentPurpose::InvoicePayment { .. }, payment_hash, .. } =>
+                       assert_eq!(&payment_hash.0, &Sha256::hash(&our_payment_preimage.0)[..]),
+               _ => panic!(),
+       }
+
        check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len());
 
        let mut expected_total_fee_msat = 0;
@@ -1641,7 +1730,7 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>,
        }
 
        // Ensure that claim_funds is idempotent.
-       assert!(!expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage));
+       expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
        assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(expected_paths[0].last().unwrap(), 0);
 
@@ -1701,13 +1790,18 @@ pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&No
        claim_payment(&origin, expected_route, our_payment_preimage);
 }
 
-pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash)  {
-       let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect();
+pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) {
        for path in expected_paths.iter() {
                assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
        }
-       assert!(expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
+       expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
        expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap());
+
+       pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash);
+}
+
+pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths_slice: &[&[&Node<'a, 'b, 'c>]], skip_last: bool, our_payment_hash: PaymentHash) {
+       let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect();
        check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len());
 
        let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len());
@@ -1806,7 +1900,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe
        }
 
        // Ensure that fail_htlc_backwards is idempotent.
-       assert!(!expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
+       expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash);
        assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_events().is_empty());
        assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(expected_paths[0].last().unwrap(), 0);
@@ -1821,7 +1915,7 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
        for i in 0..node_count {
                let tx_broadcaster = test_utils::TestBroadcaster {
                        txn_broadcasted: Mutex::new(Vec::new()),
-                       blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 0)])),
+                       blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 0)])),
                };
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let chain_source = test_utils::TestChainSource::new(Network::Testnet);
@@ -1895,7 +1989,7 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
        let mut nodes = Vec::new();
        let chan_count = Rc::new(RefCell::new(0));
        let payment_count = Rc::new(RefCell::new(0));
-       let connect_style = Rc::new(RefCell::new(ConnectStyle::FullBlockViaListen));
+       let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style()));
 
        for i in 0..node_count {
                let net_graph_msg_handler = NetGraphMsgHandler::new(cfgs[i].network_graph, None, cfgs[i].logger);
@@ -2106,7 +2200,7 @@ macro_rules! handle_chan_reestablish_msgs {
                {
                        let msg_events = $src_node.node.get_and_clear_pending_msg_events();
                        let mut idx = 0;
-                       let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) {
+                       let channel_ready = if let Some(&MessageSendEvent::SendChannelReady { ref node_id, ref msg }) = msg_events.get(0) {
                                idx += 1;
                                assert_eq!(*node_id, $dst_node.node.get_our_node_id());
                                Some(msg.clone())
@@ -2167,35 +2261,35 @@ macro_rules! handle_chan_reestablish_msgs {
 
                        assert_eq!(msg_events.len(), idx);
 
-                       (funding_locked, revoke_and_ack, commitment_update, order)
+                       (channel_ready, revoke_and_ack, commitment_update, order)
                }
        }
 }
 
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
-pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
+pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
        node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
        node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
 
-       if send_funding_locked.0 {
-               // If a expects a funding_locked, it better not think it has received a revoke_and_ack
+       if send_channel_ready.0 {
+               // If a expects a channel_ready, it better not think it has received a revoke_and_ack
                // from b
                for reestablish in reestablish_1.iter() {
                        assert_eq!(reestablish.next_remote_commitment_number, 0);
                }
        }
-       if send_funding_locked.1 {
-               // If b expects a funding_locked, it better not think it has received a revoke_and_ack
+       if send_channel_ready.1 {
+               // If b expects a channel_ready, it better not think it has received a revoke_and_ack
                // from a
                for reestablish in reestablish_2.iter() {
                        assert_eq!(reestablish.next_remote_commitment_number, 0);
                }
        }
-       if send_funding_locked.0 || send_funding_locked.1 {
-               // If we expect any funding_locked's, both sides better have set
+       if send_channel_ready.0 || send_channel_ready.1 {
+               // If we expect any channel_ready's, both sides better have set
                // next_holder_commitment_number to 1
                for reestablish in reestablish_1.iter() {
                        assert_eq!(reestablish.next_local_commitment_number, 1);
@@ -2234,8 +2328,8 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
                         pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
 
        for chan_msgs in resp_1.drain(..) {
-               if send_funding_locked.0 {
-                       node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
+               if send_channel_ready.0 {
+                       node_a.node.handle_channel_ready(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
                        let announcement_event = node_a.node.get_and_clear_pending_msg_events();
                        if !announcement_event.is_empty() {
                                assert_eq!(announcement_event.len(), 1);
@@ -2291,8 +2385,8 @@ pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a,
        }
 
        for chan_msgs in resp_2.drain(..) {
-               if send_funding_locked.1 {
-                       node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap());
+               if send_channel_ready.1 {
+                       node_b.node.handle_channel_ready(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap());
                        let announcement_event = node_b.node.get_and_clear_pending_msg_events();
                        if !announcement_event.is_empty() {
                                assert_eq!(announcement_event.len(), 1);
index 48b4b07c7d76d1f73238b27eb851ac32e720adbb..d1dbad4720b814db3fb89476e0e8e8b7cee22193 100644 (file)
@@ -1259,6 +1259,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        // Provide preimage to node 0 by claiming payment
        nodes[0].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[0], payment_hash, 800_000);
        check_added_monitors!(nodes[0], 1);
 
        // Broadcast node 1 commitment txn
@@ -1280,23 +1281,41 @@ fn test_duplicate_htlc_different_direction_onchain() {
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
-       // Check we only broadcast 1 timeout tx
        let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(claim_txn.len(), 8);
-       assert_eq!(claim_txn[1], claim_txn[4]);
-       assert_eq!(claim_txn[2], claim_txn[5]);
-       check_spends!(claim_txn[1], chan_1.3);
-       check_spends!(claim_txn[2], claim_txn[1]);
-       check_spends!(claim_txn[7], claim_txn[1]);
+
+       check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
+
+       check_spends!(claim_txn[1], chan_1.3); // Alternative commitment tx
+       check_spends!(claim_txn[2], claim_txn[1]); // HTLC spend in alternative commitment tx
+
+       let bump_tx = if claim_txn[1] == claim_txn[4] {
+               assert_eq!(claim_txn[1], claim_txn[4]);
+               assert_eq!(claim_txn[2], claim_txn[5]);
+
+               check_spends!(claim_txn[7], claim_txn[1]); // HTLC timeout on alternative commitment tx
+
+               check_spends!(claim_txn[3], remote_txn[0]); // HTLC timeout on broadcasted commitment tx
+               &claim_txn[3]
+       } else {
+               assert_eq!(claim_txn[1], claim_txn[3]);
+               assert_eq!(claim_txn[2], claim_txn[4]);
+
+               check_spends!(claim_txn[5], claim_txn[1]); // HTLC timeout on alternative commitment tx
+
+               check_spends!(claim_txn[7], remote_txn[0]); // HTLC timeout on broadcasted commitment tx
+
+               &claim_txn[7]
+       };
 
        assert_eq!(claim_txn[0].input.len(), 1);
-       assert_eq!(claim_txn[3].input.len(), 1);
-       assert_eq!(claim_txn[0].input[0].previous_output, claim_txn[3].input[0].previous_output);
+       assert_eq!(bump_tx.input.len(), 1);
+       assert_eq!(claim_txn[0].input[0].previous_output, bump_tx.input[0].previous_output);
 
        assert_eq!(claim_txn[0].input.len(), 1);
        assert_eq!(claim_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
-       check_spends!(claim_txn[0], remote_txn[0]);
        assert_eq!(remote_txn[0].output[claim_txn[0].input[0].previous_output.vout as usize].value, 800);
+
        assert_eq!(claim_txn[6].input.len(), 1);
        assert_eq!(claim_txn[6].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
        check_spends!(claim_txn[6], remote_txn[0]);
@@ -1953,9 +1972,9 @@ fn test_channel_reserve_holding_cell_htlcs() {
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        match events[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(our_payment_hash_21, *payment_hash);
-                       assert_eq!(recv_value_21, amt);
+                       assert_eq!(recv_value_21, amount_msat);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -1967,9 +1986,9 @@ fn test_channel_reserve_holding_cell_htlcs() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(our_payment_hash_22, *payment_hash);
-                       assert_eq!(recv_value_22, amt);
+                       assert_eq!(recv_value_22, amount_msat);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -2031,8 +2050,9 @@ fn channel_reserve_in_flight_removes() {
 
        let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
        // Route the first two HTLCs.
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
-       let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
+       let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
+       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
 
        // Start routing the third HTLC (this is just used to get everyone in the right state).
        let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -2046,13 +2066,15 @@ fn channel_reserve_in_flight_removes() {
 
        // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
        // initial fulfill/CS.
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1);
        check_added_monitors!(nodes[1], 1);
        let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
        // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
        // remove the second HTLC when we send the HTLC back from B to A.
-       assert!(nodes[1].node.claim_funds(payment_preimage_2));
+       nodes[1].node.claim_funds(payment_preimage_2);
+       expect_payment_claimed!(nodes[1], payment_hash_2, 20_000);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -2195,7 +2217,7 @@ fn channel_monitor_network_test() {
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
 
        // One pending HTLC is discarded by the force-close:
-       let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
@@ -2217,9 +2239,10 @@ fn channel_monitor_network_test() {
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
-               ($node: expr, $prev_node: expr, $preimage: expr) => {
+               ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
                        {
-                               assert!($node.node.claim_funds($preimage));
+                               $node.node.claim_funds($preimage);
+                               expect_payment_claimed!($node, $payment_hash, 3_000_000);
                                check_added_monitors!($node, 1);
 
                                let events = $node.node.get_and_clear_pending_msg_events();
@@ -2249,7 +2272,7 @@ fn channel_monitor_network_test() {
                node2_commitment_txid = node_txn[0].txid();
 
                // Claim the payment on nodes[3], giving it knowledge of the preimage
-               claim_funds!(nodes[3], nodes[2], payment_preimage_1);
+               claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1);
                mine_transaction(&nodes[3], &node_txn[0]);
                check_added_monitors!(nodes[3], 1);
                check_preimage_claim(&nodes[3], &node_txn);
@@ -2265,7 +2288,7 @@ fn channel_monitor_network_test() {
        let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
 
        // One pending HTLC to time out:
-       let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
+       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
        // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
        // buffer space).
 
@@ -2300,7 +2323,7 @@ fn channel_monitor_network_test() {
                let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
 
                // Claim the payment on nodes[4], giving it knowledge of the preimage
-               claim_funds!(nodes[4], nodes[3], payment_preimage_2);
+               claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2);
 
                connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
                let events = nodes[4].node.get_and_clear_pending_msg_events();
@@ -2351,7 +2374,8 @@ fn test_justice_tx() {
        chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
        // Create some new channels:
        let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
@@ -2583,11 +2607,7 @@ fn claim_htlc_outputs_single_tx() {
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 9);
-               // ChannelMonitor: justice tx revoked offered htlc, justice tx revoked received htlc, justice tx revoked to_local (3)
-               // ChannelManager: local commmitment + local HTLC-timeout (2)
-               // ChannelMonitor: bumped justice tx, after one increase, bumps on HTLC aren't generated not being substantial anymore, bump on revoked to_local isn't generated due to more room for expiration (2)
-               // ChannelMonitor: local commitment + local HTLC-timeout (2)
+               assert!(node_txn.len() == 9 || node_txn.len() == 10);
 
                // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
                assert_eq!(node_txn[0].input.len(), 1);
@@ -2655,8 +2675,8 @@ fn test_htlc_on_chain_success() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
-       let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+       let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
 
        // Broadcast legit commitment tx from C on B's chain
        // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
@@ -2664,7 +2684,9 @@ fn test_htlc_on_chain_success() {
        assert_eq!(commitment_tx.len(), 1);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.claim_funds(our_payment_preimage);
+       expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000);
        nodes[2].node.claim_funds(our_payment_preimage_2);
+       expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000);
        check_added_monitors!(nodes[2], 2);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3082,7 +3104,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
        let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
-       assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
+       nodes[2].node.fail_htlc_backwards(&first_payment_hash);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -3095,7 +3117,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
        // Drop the last RAA from 3 -> 2
 
-       assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
+       nodes[2].node.fail_htlc_backwards(&second_payment_hash);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -3112,7 +3134,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[2], 1);
 
-       assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
+       nodes[2].node.fail_htlc_backwards(&third_payment_hash);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -3476,9 +3498,10 @@ fn test_dup_events_on_peer_disconnect() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1000000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-       assert!(nodes[1].node.claim_funds(payment_preimage));
+       nodes[1].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
        check_added_monitors!(nodes[1], 1);
        let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
@@ -3601,18 +3624,18 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let mut as_funding_locked = None;
+       let mut as_channel_ready = None;
        if messages_delivered == 0 {
-               let (funding_locked, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
-               as_funding_locked = Some(funding_locked);
-               // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
+               let (channel_ready, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               as_channel_ready = Some(channel_ready);
+               // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
                // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
                // it before the channel_reestablish message.
        } else {
                create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
        }
 
-       let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
+       let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
 
        let payment_event = {
                nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
@@ -3658,17 +3681,17 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        if messages_delivered < 3 {
                if simulate_broken_lnd {
-                       // lnd has a long-standing bug where they send a funding_locked prior to a
-                       // channel_reestablish if you reconnect prior to funding_locked time.
+                       // lnd has a long-standing bug where they send a channel_ready prior to a
+                       // channel_reestablish if you reconnect prior to channel_ready time.
                        //
-                       // Here we simulate that behavior, delivering a funding_locked immediately on
-                       // reconnect. Note that we don't bother skipping the now-duplicate funding_locked sent
+                       // Here we simulate that behavior, delivering a channel_ready immediately on
+                       // reconnect. Note that we don't bother skipping the now-duplicate channel_ready sent
                        // in `reconnect_nodes` but we currently don't fail based on that.
                        //
                        // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
-                       nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked.as_ref().unwrap().0);
+                       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0);
                }
-               // Even if the funding_locked messages get exchanged, as long as nothing further was
+               // Even if the channel_ready messages get exchanged, as long as nothing further was
                // received on either side, both sides will need to resend them.
                reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        } else if messages_delivered == 3 {
@@ -3701,9 +3724,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        let events_2 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_2.len(), 1);
        match events_2[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
+               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
                        assert_eq!(payment_hash_1, *payment_hash);
-                       assert_eq!(amt, 1000000);
+                       assert_eq!(amount_msat, 1_000_000);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -3717,6 +3740,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
 
        let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_3.len(), 1);
@@ -3862,26 +3886,26 @@ fn test_funding_peer_disconnect() {
        nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
-       // nodes[0] hasn't yet received a funding_locked, so it only sends that on reconnect.
+       // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events_3.len(), 1);
-       let as_funding_locked = match events_3[0] {
-               MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
+       let as_channel_ready = match events_3[0] {
+               MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
                        assert_eq!(*node_id, nodes[1].node.get_our_node_id());
                        msg.clone()
                },
                _ => panic!("Unexpected event {:?}", events_3[0]),
        };
 
-       // nodes[1] received nodes[0]'s funding_locked on the first reconnect above, so it should send
+       // nodes[1] received nodes[0]'s channel_ready on the first reconnect above, so it should send
        // announcement_signatures as well as channel_update.
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
        let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_4.len(), 3);
        let chan_id;
-       let bs_funding_locked = match events_4[0] {
-               MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
+       let bs_channel_ready = match events_4[0] {
+               MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
                        chan_id = msg.channel_id;
                        msg.clone()
@@ -3902,9 +3926,9 @@ fn test_funding_peer_disconnect() {
                _ => panic!("Unexpected event {:?}", events_4[2]),
        }
 
-       // Re-deliver nodes[0]'s funding_locked, which nodes[1] can safely ignore. It currently
+       // Re-deliver nodes[0]'s channel_ready, which nodes[1] can safely ignore. It currently
        // generates a duplicative private channel_update
-       nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
        let events_5 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_5.len(), 1);
        match events_5[0] {
@@ -3914,9 +3938,9 @@ fn test_funding_peer_disconnect() {
                _ => panic!("Unexpected event {:?}", events_5[0]),
        };
 
-       // When we deliver nodes[1]'s funding_locked, however, nodes[0] will generate its
+       // When we deliver nodes[1]'s channel_ready, however, nodes[0] will generate its
        // announcement_signatures.
-       nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding_locked);
+       nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready);
        let events_6 = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events_6.len(), 1);
        let as_announcement_sigs = match events_6[0] {
@@ -4019,11 +4043,11 @@ fn test_funding_peer_disconnect() {
 }
 
 #[test]
-fn test_funding_locked_without_best_block_updated() {
+fn test_channel_ready_without_best_block_updated() {
        // Previously, if we were offline when a funding transaction was locked in, and then we came
        // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
-       // generate a funding_locked until a later best_block_updated. This tests that we generate the
-       // funding_locked immediately instead.
+       // generate a channel_ready until a later best_block_updated. This tests that we generate the
+       // channel_ready immediately instead.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -4039,9 +4063,9 @@ fn test_funding_locked_without_best_block_updated() {
        let conf_block_header = nodes[0].get_block_header(conf_height);
        nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
 
-       // Ensure nodes[0] generates a funding_locked after the transactions_confirmed
-       let as_funding_locked = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
-       nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
+       // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+       let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
 }
 
 #[test]
@@ -4054,7 +4078,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -4068,7 +4092,8 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
                _ => panic!("Unexpected event"),
        }
 
-       assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].node.claim_funds(payment_preimage_1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
 
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
@@ -4393,8 +4418,8 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-       let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+       let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
        for node in nodes.iter() {
                assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
                node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
@@ -4513,8 +4538,8 @@ fn test_manager_serialize_deserialize_events() {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-       let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+       let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
        for node in nodes.iter() {
                assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
                node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
@@ -4844,14 +4869,15 @@ fn test_static_spendable_outputs_preimage_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
 
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(commitment_tx[0].input.len(), 1);
        assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
 
        // Settle A's commitment tx on B's chain
-       assert!(nodes[1].node.claim_funds(payment_preimage));
+       nodes[1].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
        check_added_monitors!(nodes[1], 1);
        mine_transaction(&nodes[1], &commitment_tx[0]);
        check_added_monitors!(nodes[1], 1);
@@ -5144,10 +5170,11 @@ fn test_onchain_to_onchain_claim() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+       let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
        let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -5262,7 +5289,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
        connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
 
-       let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
+       let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
 
        let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200).unwrap();
        // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
@@ -5286,25 +5313,36 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        let htlc_timeout_tx;
        { // Extract one of the two HTLC-Timeout transaction
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               // ChannelMonitor: timeout tx * 3, ChannelManager: local commitment tx
-               assert_eq!(node_txn.len(), 4);
+               // ChannelMonitor: timeout tx * 2-or-3, ChannelManager: local commitment tx
+               assert!(node_txn.len() == 4 || node_txn.len() == 3);
                check_spends!(node_txn[0], chan_2.3);
 
                check_spends!(node_txn[1], commitment_txn[0]);
                assert_eq!(node_txn[1].input.len(), 1);
-               check_spends!(node_txn[2], commitment_txn[0]);
-               assert_eq!(node_txn[2].input.len(), 1);
-               assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
-               check_spends!(node_txn[3], commitment_txn[0]);
-               assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output);
+
+               if node_txn.len() > 3 {
+                       check_spends!(node_txn[2], commitment_txn[0]);
+                       assert_eq!(node_txn[2].input.len(), 1);
+                       assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
+
+                       check_spends!(node_txn[3], commitment_txn[0]);
+                       assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output);
+               } else {
+                       check_spends!(node_txn[2], commitment_txn[0]);
+                       assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
+               }
 
                assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
                assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
-               assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+               if node_txn.len() > 3 {
+                       assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+               }
                htlc_timeout_tx = node_txn[1].clone();
        }
 
        nodes[2].node.claim_funds(our_payment_preimage);
+       expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000);
+
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
@@ -5385,7 +5423,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
        let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
        assert_eq!(local_txn.len(), 1);
        assert_eq!(local_txn[0].input.len(), 1);
@@ -5393,7 +5431,9 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
 
        // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
        nodes[1].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
        check_added_monitors!(nodes[1], 1);
+
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
@@ -5502,10 +5542,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
        // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
-       assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1));
-       assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3));
-       assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
-       assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
+       nodes[4].node.fail_htlc_backwards(&payment_hash_1);
+       nodes[4].node.fail_htlc_backwards(&payment_hash_3);
+       nodes[4].node.fail_htlc_backwards(&payment_hash_5);
+       nodes[4].node.fail_htlc_backwards(&payment_hash_6);
        check_added_monitors!(nodes[4], 0);
        expect_pending_htlcs_forwardable!(nodes[4]);
        check_added_monitors!(nodes[4], 1);
@@ -5518,8 +5558,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
 
        // Fail 3rd below-dust and 7th above-dust HTLCs
-       assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
-       assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
+       nodes[5].node.fail_htlc_backwards(&payment_hash_2);
+       nodes[5].node.fail_htlc_backwards(&payment_hash_4);
        check_added_monitors!(nodes[5], 0);
        expect_pending_htlcs_forwardable!(nodes[5]);
        check_added_monitors!(nodes[5], 1);
@@ -5868,12 +5908,13 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
 
        // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
        // present in B's local commitment transaction, but none of A's commitment transactions.
-       assert!(nodes[1].node.claim_funds(payment_preimage));
+       nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 });
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
@@ -5943,7 +5984,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        // actually revoked.
        let htlc_value = if use_dust { 50000 } else { 3000000 };
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
-       assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
+       nodes[1].node.fail_htlc_backwards(&our_payment_hash);
        expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
 
@@ -6307,6 +6348,8 @@ fn test_free_and_fail_holding_cell_htlcs() {
        }
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_1, amt_1);
+
        let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
@@ -6895,10 +6938,11 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
+       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
 
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -6937,10 +6981,11 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
+       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], our_payment_hash, 100_000);
 
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -7115,7 +7160,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
 
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
-       assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
+       nodes[1].node.fail_htlc_backwards(&payment_hash_2);
        check_added_monitors!(nodes[1], 0);
        expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
@@ -7927,7 +7972,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
-       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
 
        // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
@@ -7938,6 +7983,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
 
        // Claim a HTLC without revocation (provide B monitor with preimage)
        nodes[1].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
        mine_transaction(&nodes[1], &remote_txn[0]);
        check_added_monitors!(nodes[1], 2);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
@@ -7960,13 +8006,24 @@ fn test_bump_penalty_txn_on_remote_commitment() {
                assert_eq!(node_txn[6].input.len(), 1);
                check_spends!(node_txn[0], remote_txn[0]);
                check_spends!(node_txn[6], remote_txn[0]);
-               assert_eq!(node_txn[0].input[0].previous_output, node_txn[3].input[0].previous_output);
-               preimage_bump = node_txn[3].clone();
 
                check_spends!(node_txn[1], chan.3);
                check_spends!(node_txn[2], node_txn[1]);
-               assert_eq!(node_txn[1], node_txn[4]);
-               assert_eq!(node_txn[2], node_txn[5]);
+
+               if node_txn[0].input[0].previous_output == node_txn[3].input[0].previous_output {
+                       preimage_bump = node_txn[3].clone();
+                       check_spends!(node_txn[3], remote_txn[0]);
+
+                       assert_eq!(node_txn[1], node_txn[4]);
+                       assert_eq!(node_txn[2], node_txn[5]);
+               } else {
+                       preimage_bump = node_txn[7].clone();
+                       check_spends!(node_txn[7], remote_txn[0]);
+                       assert_eq!(node_txn[0].input[0].previous_output, node_txn[7].input[0].previous_output);
+
+                       assert_eq!(node_txn[1], node_txn[3]);
+                       assert_eq!(node_txn[2], node_txn[4]);
+               }
 
                timeout = node_txn[6].txid();
                let index = node_txn[6].input[0].previous_output.vout;
@@ -8113,8 +8170,9 @@ fn test_pending_claimed_htlc_no_balance_underflow() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 
-       let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_010_000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_010_000);
        nodes[1].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, 1_010_000);
        check_added_monitors!(nodes[1], 1);
        let fulfill_ev = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
@@ -8696,7 +8754,7 @@ fn test_update_err_monitor_lockdown() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
 
        // Route a HTLC from node 0 to node 1 (but don't settle)
-       let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
+       let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
 
        // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
@@ -8714,14 +8772,17 @@ fn test_update_err_monitor_lockdown() {
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let block = Block { header, txdata: vec![] };
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
-       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (header, 0));
-       watchtower.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
+       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 0));
+       watchtower.chain_monitor.block_connected(&block, 200);
 
        // Try to update ChannelMonitor
-       assert!(nodes[1].node.claim_funds(preimage));
+       nodes[1].node.claim_funds(preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
+
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
@@ -8775,10 +8836,11 @@ fn test_concurrent_monitor_claim() {
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let block = Block { header, txdata: vec![] };
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
-       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (header, 0));
-       watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
+       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (block.clone(), 0));
+       watchtower_alice.chain_monitor.block_connected(&block, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
 
        // Watchtower Alice should have broadcast a commitment/HTLC-timeout
        {
@@ -8964,7 +9026,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
 
        // Steps (1) and (2):
        // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
-       let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3_000_000);
+       let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
 
        // Check that Alice's commitment transaction now contains an output for this HTLC.
        let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
@@ -9009,8 +9071,10 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        // Step (5):
        // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
        // process of removing the HTLC from their commitment transactions.
-       assert!(nodes[2].node.claim_funds(payment_preimage));
+       nodes[2].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
+
        let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(carol_updates.update_add_htlcs.is_empty());
        assert!(carol_updates.update_fail_htlcs.is_empty());
@@ -9222,7 +9286,11 @@ fn test_duplicate_chan_id() {
 
        let funding_created = {
                let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
-               let mut as_chan = a_channel_lock.by_id.get_mut(&open_chan_2_msg.temporary_channel_id).unwrap();
+               // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+               // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
+               // try to create another channel. Instead, we drop the channel entirely here (leaving the
+               // channelmanager in a possibly nonsense state instead).
+               let mut as_chan = a_channel_lock.by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
                as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
        };
@@ -9260,10 +9328,10 @@ fn test_duplicate_chan_id() {
        let events_4 = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events_4.len(), 0);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
-       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
 
-       let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+       let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
        update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
 }
@@ -9563,7 +9631,7 @@ fn test_forwardable_regen() {
        check_added_monitors!(nodes[1], 2);
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
-       // Note that nodes[1] and nodes[2] resend their funding_locked here since they haven't updated
+       // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
        // the commitment state.
        reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -9843,6 +9911,249 @@ fn test_keysend_payments_to_private_node() {
        claim_payment(&nodes[0], &path, test_preimage);
 }
 
+#[test]
+fn test_double_partial_claim() {
+       // Test what happens if a node receives a payment, generates a PaymentReceived event, the HTLCs
+       // time out, the sender resends only some of the MPP parts, then the user processes the
+       // PaymentReceived event, ensuring they don't inadvertently claim only part of the full payment
+       // amount.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
+       assert_eq!(route.paths.len(), 2);
+       route.paths.sort_by(|path_a, _| {
+               // Sort the path so that the path through nodes[1] comes first
+               if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+                       core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+       });
+
+       send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
+       // nodes[3] has now received a PaymentReceived event...which it will take some (exorbitant)
+       // amount of time to respond to.
+
+       // Connect some blocks to time out the payment
+       connect_blocks(&nodes[3], TEST_FINAL_CLTV);
+       connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
+
+       expect_pending_htlcs_forwardable!(nodes[3]);
+
+       pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
+
+       // nodes[1] now retries one of the two paths...
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 2);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+       // At this point nodes[3] has received one half of the payment, and the user goes to handle
+       // that PaymentReceived event they got hours ago and never handled...we should refuse to claim.
+       nodes[3].node.claim_funds(payment_preimage);
+       check_added_monitors!(nodes[3], 0);
+       assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
+}
+
+fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
+       // Test what happens if a node receives an MPP payment, claims it, but crashes before
+       // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
+       // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
+       // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
+       // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
+       // not have the preimage tied to the still-pending HTLC.
+       //
+       // To get to the correct state, on startup we should propagate the preimage to the
+       // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
+       // receiving the preimage without a state update.
+       //
+       // Further, we should generate a `PaymentClaimed` event to inform the user that the payment was
+       // definitely claimed.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+
+       let persister: test_utils::TestPersister;
+       let new_chain_monitor: test_utils::TestChainMonitor;
+       let nodes_3_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+
+       let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
+       let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
+
+       // Create an MPP route for 15k sats, more than the default htlc-max of 10%
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
+       assert_eq!(route.paths.len(), 2);
+       route.paths.sort_by(|path_a, _| {
+               // Sort the path so that the path through nodes[1] comes first
+               if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+                       core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+       });
+
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 2);
+
+       // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
+       let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(send_events.len(), 2);
+       do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
+       do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
+
+       // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
+       // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
+       let mut original_monitor = test_utils::TestVecWriter(Vec::new());
+       if !persist_both_monitors {
+               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if outpoint.to_channel_id() == chan_id_not_persisted {
+                               assert!(original_monitor.0.is_empty());
+                               nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+                       }
+               }
+       }
+
+       let mut original_manager = test_utils::TestVecWriter(Vec::new());
+       nodes[3].node.write(&mut original_manager).unwrap();
+
+       expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
+
+       nodes[3].node.claim_funds(payment_preimage);
+       check_added_monitors!(nodes[3], 2);
+       expect_payment_claimed!(nodes[3], payment_hash, 15_000_000);
+
+       // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
+       // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
+       // with the old ChannelManager.
+       let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
+       for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+               if outpoint.to_channel_id() == chan_id_persisted {
+                       assert!(updated_monitor.0.is_empty());
+                       nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
+               }
+       }
+       // If `persist_both_monitors` is set, get the second monitor here as well
+       if persist_both_monitors {
+               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if outpoint.to_channel_id() == chan_id_not_persisted {
+                               assert!(original_monitor.0.is_empty());
+                               nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+                       }
+               }
+       }
+
+       // Now restart nodes[3].
+       persister = test_utils::TestPersister::new();
+       let keys_manager = &chanmon_cfgs[3].keys_manager;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager);
+       nodes[3].chain_monitor = &new_chain_monitor;
+       let mut monitors = Vec::new();
+       for mut monitor_data in [original_monitor, updated_monitor].iter() {
+               let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut &monitor_data.0[..], keys_manager).unwrap();
+               monitors.push(deserialized_monitor);
+       }
+
+       let config = UserConfig::default();
+       nodes_3_deserialized = {
+               let mut channel_monitors = HashMap::new();
+               for monitor in monitors.iter_mut() {
+                       channel_monitors.insert(monitor.get_funding_txo().0, monitor);
+               }
+               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs {
+                       default_config: config,
+                       keys_manager,
+                       fee_estimator: node_cfgs[3].fee_estimator,
+                       chain_monitor: nodes[3].chain_monitor,
+                       tx_broadcaster: nodes[3].tx_broadcaster.clone(),
+                       logger: nodes[3].logger,
+                       channel_monitors,
+               }).unwrap().1
+       };
+       nodes[3].node = &nodes_3_deserialized;
+
+       for monitor in monitors {
+               // On startup the preimage should have been copied into the non-persisted monitor:
+               assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
+               nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
+       }
+       check_added_monitors!(nodes[3], 2);
+
+       nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+
+       // During deserialization, we should have closed one channel and broadcast its latest
+       // commitment transaction. We should also still have the original PaymentReceived event we
+       // never finished processing.
+       let events = nodes[3].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
+       if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
+       if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
+       if persist_both_monitors {
+               if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+       }
+
+       // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
+       // ChannelManager prior to handling the original one.
+       if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } =
+               events[if persist_both_monitors { 3 } else { 2 }]
+       {
+               assert_eq!(payment_hash, our_payment_hash);
+       } else { panic!(); }
+
+       assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
+       if !persist_both_monitors {
+               // If one of the two channels is still live, reveal the payment preimage over it.
+
+               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
+               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
+
+               nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
+               get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
+               assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+
+               nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
+
+               // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
+               // claim should fly.
+               let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
+               check_added_monitors!(nodes[3], 1);
+               assert_eq!(ds_msgs.len(), 2);
+               if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
+
+               let cs_updates = match ds_msgs[0] {
+                       MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
+                               nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+                               check_added_monitors!(nodes[2], 1);
+                               let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+                               expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
+                               commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
+                               cs_updates
+                       }
+                       _ => panic!(),
+               };
+
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
+               expect_payment_sent!(nodes[0], payment_preimage);
+       }
+}
+
+#[test]
+fn test_partial_claim_before_restart() {
+       do_test_partial_claim_before_restart(false);
+       do_test_partial_claim_before_restart(true);
+}
+
 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
 #[derive(Clone, Copy, PartialEq)]
 enum ExposureEvent {
@@ -9902,8 +10213,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
        check_added_monitors!(nodes[0], 1);
 
-       let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
-       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+       let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+       let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
        update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
 
        let dust_buffer_feerate = {
index af42644dd710164a8c1a633ef54975aa2a238873..492a65537701750f7a97c56ac6cb287da395aac0 100644 (file)
@@ -20,6 +20,7 @@ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureRe
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
 use bitcoin::secp256k1::Secp256k1;
+use bitcoin::Transaction;
 
 use prelude::*;
 
@@ -82,6 +83,17 @@ fn chanmon_fail_from_stale_commitment() {
        expect_payment_failed_with_update!(nodes[0], payment_hash, false, update_a.contents.short_channel_id, true);
 }
 
+fn test_spendable_output<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_tx: &Transaction) {
+       let mut spendable = node.chain_monitor.chain_monitor.get_and_clear_pending_events();
+       assert_eq!(spendable.len(), 1);
+       if let Event::SpendableOutputs { outputs } = spendable.pop().unwrap() {
+               assert_eq!(outputs.len(), 1);
+               let spend_tx = node.keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+               check_spends!(spend_tx, spendable_tx);
+       } else { panic!(); }
+}
+
 #[test]
 fn chanmon_claim_value_coop_close() {
        // Tests `get_claimable_balances` returns the correct values across a simple cooperative claim.
@@ -155,23 +167,9 @@ fn chanmon_claim_value_coop_close() {
        assert_eq!(Vec::<Balance>::new(),
                nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
 
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, shutdown_tx[0]);
-       }
+       test_spendable_output(&nodes[0], &shutdown_tx[0]);
+       test_spendable_output(&nodes[1], &shutdown_tx[0]);
 
-       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_b_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, shutdown_tx[0]);
-       }
        check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
@@ -203,7 +201,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 
        // This HTLC is immediately claimed, giving node B the preimage
-       let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
        // This HTLC is allowed to time out, letting A claim it. However, in order to test claimable
        // balances more fully we also give B the preimage for this HTLC.
        let (timeout_payment_preimage, timeout_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 4_000_000);
@@ -236,13 +234,18 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
 
        nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, 3_000_000);
+
        let b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
        // We claim the dust payment here as well, but it won't impact our claimable balances as its
        // dust and thus doesn't appear on chain at all.
        nodes[1].node.claim_funds(dust_payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], dust_payment_hash, 3_000);
+
        nodes[1].node.claim_funds(timeout_payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], timeout_payment_hash, 4_000_000);
 
        if prev_commitment_tx {
                // To build a previous commitment transaction, deliver one round of commitment messages.
@@ -384,15 +387,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                }]),
                sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
 
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, remote_txn[0]);
-       }
-
+       test_spendable_output(&nodes[0], &remote_txn[0]);
        assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
 
        // After broadcasting the HTLC claim transaction, node A will still consider the HTLC
@@ -449,14 +444,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
        expect_payment_failed!(nodes[0], timeout_payment_hash, true);
 
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, a_broadcast_txn[2]);
-       } else { panic!(); }
+       test_spendable_output(&nodes[0], &a_broadcast_txn[2]);
 
        // Node B will no longer consider the HTLC "contentious" after the HTLC claim transaction
        // confirms, and consider it simply "awaiting confirmations". Note that it has to wait for the
@@ -479,15 +467,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // After reaching the commitment output CSV, we'll get a SpendableOutputs event for it and have
        // only the HTLCs claimable on node B.
        connect_blocks(&nodes[1], node_b_commitment_claimable - nodes[1].best_block_info().1);
-
-       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_b_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, remote_txn[0]);
-       }
+       test_spendable_output(&nodes[1], &remote_txn[0]);
 
        assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
                        claimable_amount_satoshis: 3_000,
@@ -501,15 +481,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // After reaching the claimed HTLC output CSV, we'll get a SpendableOutptus event for it and
        // have only one HTLC output left spendable.
        connect_blocks(&nodes[1], node_b_htlc_claimable - nodes[1].best_block_info().1);
-
-       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_b_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, b_broadcast_txn[0]);
-       } else { panic!(); }
+       test_spendable_output(&nodes[1], &b_broadcast_txn[0]);
 
        assert_eq!(vec![Balance::ContentiousClaimable {
                        claimable_amount_satoshis: 4_000,
@@ -580,9 +552,10 @@ fn test_balances_on_local_commitment_htlcs() {
 
        expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000);
-       assert!(nodes[1].node.claim_funds(payment_preimage_2));
+       nodes[1].node.claim_funds(payment_preimage_2);
        get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000);
 
        let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
        let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
@@ -704,25 +677,11 @@ fn test_balances_on_local_commitment_htlcs() {
                        confirmation_height: node_a_htlc_claimable,
                }],
                nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, as_txn[0]);
-       }
+       test_spendable_output(&nodes[0], &as_txn[0]);
 
        // Connect blocks until the HTLC-Timeout's CSV expires, providing us the relevant
        // `SpendableOutputs` event and removing the claimable balance entry.
        connect_blocks(&nodes[0], node_a_htlc_claimable - nodes[0].best_block_info().1);
        assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, as_txn[1]);
-       }
+       test_spendable_output(&nodes[0], &as_txn[1]);
 }
index 5aa48c32bf07686aba4adad53b9615a6cdd91bb1..0e5b2e07e7a4cea649bc44939a67f2c940f71779 100644 (file)
@@ -239,9 +239,9 @@ pub struct FundingSigned {
        pub signature: Signature,
 }
 
-/// A funding_locked message to be sent or received from a peer
+/// A channel_ready message to be sent or received from a peer
 #[derive(Clone, Debug, PartialEq)]
-pub struct FundingLocked {
+pub struct ChannelReady {
        /// The channel ID
        pub channel_id: [u8; 32],
        /// The per-commitment point of the second commitment transaction
@@ -815,8 +815,8 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider {
        fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated);
        /// Handle an incoming funding_signed message from the given peer.
        fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned);
-       /// Handle an incoming funding_locked message from the given peer.
-       fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked);
+       /// Handle an incoming channel_ready message from the given peer.
+       fn handle_channel_ready(&self, their_node_id: &PublicKey, msg: &ChannelReady);
 
        // Channl close:
        /// Handle an incoming shutdown message from the given peer.
@@ -1163,7 +1163,7 @@ impl_writeable_msg!(FundingSigned, {
        signature
 }, {});
 
-impl_writeable_msg!(FundingLocked, {
+impl_writeable_msg!(ChannelReady, {
        channel_id,
        next_per_commitment_point,
 }, {
@@ -2262,15 +2262,15 @@ mod tests {
        }
 
        #[test]
-       fn encoding_funding_locked() {
+       fn encoding_channel_ready() {
                let secp_ctx = Secp256k1::new();
                let (_, pubkey_1,) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
-               let funding_locked = msgs::FundingLocked {
+               let channel_ready = msgs::ChannelReady {
                        channel_id: [2; 32],
                        next_per_commitment_point: pubkey_1,
                        short_channel_id_alias: None,
                };
-               let encoded_value = funding_locked.encode();
+               let encoded_value = channel_ready.encode();
                let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap();
                assert_eq!(encoded_value, target_value);
        }
index 9a07603fafe7e89e90b71a0d943bba59c483a223..802cd3acab45bd4592a7f80cdb982cfadeb7ed52 100644 (file)
@@ -1155,7 +1155,7 @@ fn test_phantom_failure_reject_payment() {
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
        expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat);
-       assert!(nodes[1].node.fail_htlc_backwards(&payment_hash));
+       nodes[1].node.fail_htlc_backwards(&payment_hash);
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
 
index 2d57950029e319fcabfb9532b87cda421b283dcf..07e531c5b4b7e330b252414a89207ea70e698bc2 100644 (file)
@@ -367,7 +367,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
        let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
 
        // Serialize the ChannelManager prior to sending payments
@@ -376,7 +376,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
        // out and retry.
        let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
-       let (payment_preimage_1, _, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
        let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
        check_added_monitors!(nodes[0], 1);
 
@@ -475,6 +475,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // we close in a moment.
        nodes[2].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
+
        let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
        check_added_monitors!(nodes[1], 1);
@@ -482,7 +484,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
 
        if confirm_before_reload {
                let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
-               nodes[0].node.best_block_updated(&best_block.0, best_block.1);
+               nodes[0].node.best_block_updated(&best_block.0.header, best_block.1);
        }
 
        // Create a new channel on which to retry the payment before we fail the payment via the
@@ -504,14 +506,19 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        expect_payment_sent!(nodes[0], payment_preimage_1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
        let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       check_spends!(as_htlc_timeout_txn[2], funding_tx);
-       check_spends!(as_htlc_timeout_txn[0], as_commitment_tx);
-       check_spends!(as_htlc_timeout_txn[1], as_commitment_tx);
        assert_eq!(as_htlc_timeout_txn.len(), 3);
-       if as_htlc_timeout_txn[0].input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
-               confirm_transaction(&nodes[0], &as_htlc_timeout_txn[1]);
+       let (first_htlc_timeout_tx, second_htlc_timeout_tx) = if as_htlc_timeout_txn[0] == as_commitment_tx {
+               (&as_htlc_timeout_txn[1], &as_htlc_timeout_txn[2])
+       } else {
+               assert_eq!(as_htlc_timeout_txn[2], as_commitment_tx);
+               (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1])
+       };
+       check_spends!(first_htlc_timeout_tx, as_commitment_tx);
+       check_spends!(second_htlc_timeout_tx, as_commitment_tx);
+       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+               confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
        } else {
-               confirm_transaction(&nodes[0], &as_htlc_timeout_txn[0]);
+               confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
        }
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
        expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
@@ -564,7 +571,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -582,8 +589,9 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_spends!(node_txn[2], node_txn[1]);
        let timeout_txn = vec![node_txn[2].clone()];
 
-       assert!(nodes[1].node.claim_funds(payment_preimage));
+       nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
 
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
@@ -627,7 +635,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
        let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
                .get_mut(&funding_txo).unwrap().drain().collect();
-       assert_eq!(mon_updates.len(), 1);
+       // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice
+       assert!(mon_updates.len() == 1 || mon_updates.len() == 2);
        assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 
@@ -643,7 +652,9 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        chanmon_cfgs[0].persister.set_update_ret(Ok(()));
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-       nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap();
+       for update in mon_updates {
+               nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
+       }
        if payment_timeout {
                expect_payment_failed!(nodes[0], payment_hash, true);
        } else {
@@ -740,6 +751,8 @@ fn test_fulfill_restart_failure() {
 
        nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, 100_000);
+
        let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
        expect_payment_sent_without_paths!(nodes[0], payment_preimage);
index f128f6c5259802466cf99f27496b554d7b88f9ea..68a7ef952702b31436ea18ff27bb83f3b67b3fe3 100644 (file)
@@ -151,7 +151,7 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
                ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
        }
-       fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+       fn handle_channel_ready(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReady) {
                ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
        }
        fn handle_shutdown(&self, their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) {
@@ -1209,8 +1209,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        wire::Message::FundingSigned(msg) => {
                                self.message_handler.chan_handler.handle_funding_signed(&their_node_id, &msg);
                        },
-                       wire::Message::FundingLocked(msg) => {
-                               self.message_handler.chan_handler.handle_funding_locked(&their_node_id, &msg);
+                       wire::Message::ChannelReady(msg) => {
+                               self.message_handler.chan_handler.handle_channel_ready(&their_node_id, &msg);
                        },
 
                        wire::Message::Shutdown(msg) => {
@@ -1486,8 +1486,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                                log_bytes!(msg.channel_id));
                                                self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
-                                       MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
+                                       MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+                                               log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
                                                self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
index 47e2fb33e3c7321103350865f97b5f5cc5897d14..754e20a4ff11ba0c68a891059a683705892849ff 100644 (file)
@@ -11,7 +11,7 @@
 //! other behavior that exists only on private channels or with a semi-trusted counterparty (eg
 //! LSP).
 
-use chain::Watch;
+use chain::{ChannelMonitorUpdateErr, Watch};
 use chain::channelmonitor::ChannelMonitor;
 use chain::keysinterface::{Recipient, KeysInterface};
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
@@ -22,7 +22,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate};
 use ln::wire::Encode;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
 use util::config::UserConfig;
 use util::ser::{Writeable, ReadableArgs};
 use util::test_utils;
@@ -167,7 +167,7 @@ fn test_priv_forwarding_rejection() {
 }
 
 fn do_test_1_conf_open(connect_style: ConnectStyle) {
-       // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
+       // Previously, if the minium_depth config was set to 1, we'd never send a channel_ready. This
        // tests that we properly send one in that case.
        let mut alice_config = UserConfig::default();
        alice_config.own_channel_config.minimum_depth = 1;
@@ -185,13 +185,13 @@ fn do_test_1_conf_open(connect_style: ConnectStyle) {
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
        mine_transaction(&nodes[1], &tx);
-       nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
+       nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        mine_transaction(&nodes[0], &tx);
        let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(as_msg_events.len(), 2);
-       let as_funding_locked = if let MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = as_msg_events[0] {
+       let as_channel_ready = if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = as_msg_events[0] {
                assert_eq!(*node_id, nodes[1].node.get_our_node_id());
                msg.clone()
        } else { panic!("Unexpected event"); };
@@ -199,7 +199,7 @@ fn do_test_1_conf_open(connect_style: ConnectStyle) {
                assert_eq!(*node_id, nodes[1].node.get_our_node_id());
        } else { panic!("Unexpected event"); }
 
-       nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
        let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(bs_msg_events.len(), 1);
        if let MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } = bs_msg_events[0] {
@@ -259,7 +259,7 @@ fn test_routed_scid_alias() {
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).2;
-       let mut as_funding_locked = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).0;
+       let mut as_channel_ready = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()).0;
 
        let last_hop = nodes[2].node.list_usable_channels();
        let hop_hints = vec![RouteHint(vec![RouteHintHop {
@@ -284,17 +284,17 @@ fn test_routed_scid_alias() {
        pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret);
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
 
-       // Now test that if a peer sends us a second funding_locked after the channel is operational we
+       // Now test that if a peer sends us a second channel_ready after the channel is operational we
        // will use the new alias.
-       as_funding_locked.short_channel_id_alias = Some(0xdeadbeef);
-       nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
-       // Note that we always respond to a funding_locked with a channel_update. Not a lot of reason
+       as_channel_ready.short_channel_id_alias = Some(0xdeadbeef);
+       nodes[2].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &as_channel_ready);
+       // Note that we always respond to a channel_ready with a channel_update. Not a lot of reason
        // to bother updating that code, so just drop the message here.
        get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
        let updated_channel_info = nodes[2].node.list_usable_channels();
        assert_eq!(updated_channel_info.len(), 1);
        assert_eq!(updated_channel_info[0].inbound_scid_alias.unwrap(), 0xdeadbeef);
-       // Note that because we never send a duplicate funding_locked we can't send a payment through
+       // Note that because we never send a duplicate channel_ready we can't send a payment through
        // the 0xdeadbeef SCID alias.
 }
 
@@ -403,10 +403,10 @@ fn test_inbound_scid_privacy() {
        connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
        confirm_transaction_at(&nodes[2], &tx, conf_height);
        connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
-       let bs_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
-       nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+       let bs_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[2].node.get_our_node_id());
+       nodes[1].node.handle_channel_ready(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
        let bs_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
-       nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding_locked);
+       nodes[2].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready);
        let cs_update = get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &cs_update);
@@ -564,3 +564,361 @@ fn test_scid_alias_returned() {
                PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap())
                        .blamed_chan_closed(false).expected_htlc_error_data(0x1000|12, &err_data));
 }
+
+// Receiver must have been initialized with manually_accept_inbound_channels set to true.
+fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option<UserConfig>) -> bitcoin::Transaction {
+       initiator.node.create_channel(receiver.node.get_our_node_id(), 100_000, 10_001, 42, initiator_config).unwrap();
+       let open_channel = get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver.node.get_our_node_id());
+
+       receiver.node.handle_open_channel(&initiator.node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let events = receiver.node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       receiver.node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &initiator.node.get_our_node_id(), 0).unwrap();
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let mut accept_channel = get_event_msg!(receiver, MessageSendEvent::SendAcceptChannel, initiator.node.get_our_node_id());
+       assert_eq!(accept_channel.minimum_depth, 0);
+       initiator.node.handle_accept_channel(&receiver.node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+       let (temporary_channel_id, tx, _) = create_funding_transaction(&initiator, &receiver.node.get_our_node_id(), 100_000, 42);
+       initiator.node.funding_transaction_generated(&temporary_channel_id, &receiver.node.get_our_node_id(), tx.clone()).unwrap();
+       let funding_created = get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver.node.get_our_node_id());
+
+       receiver.node.handle_funding_created(&initiator.node.get_our_node_id(), &funding_created);
+       check_added_monitors!(receiver, 1);
+       let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events();
+       assert_eq!(bs_signed_locked.len(), 2);
+       let as_channel_ready;
+       match &bs_signed_locked[0] {
+               MessageSendEvent::SendFundingSigned { node_id, msg } => {
+                       assert_eq!(*node_id, initiator.node.get_our_node_id());
+                       initiator.node.handle_funding_signed(&receiver.node.get_our_node_id(), &msg);
+                       check_added_monitors!(initiator, 1);
+
+                       assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+                       assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], tx);
+
+                       as_channel_ready = get_event_msg!(initiator, MessageSendEvent::SendChannelReady, receiver.node.get_our_node_id());
+               }
+               _ => panic!("Unexpected event"),
+       }
+       match &bs_signed_locked[1] {
+               MessageSendEvent::SendChannelReady { node_id, msg } => {
+                       assert_eq!(*node_id, initiator.node.get_our_node_id());
+                       initiator.node.handle_channel_ready(&receiver.node.get_our_node_id(), &msg);
+               }
+               _ => panic!("Unexpected event"),
+       }
+
+       receiver.node.handle_channel_ready(&initiator.node.get_our_node_id(), &as_channel_ready);
+
+       let as_channel_update = get_event_msg!(initiator, MessageSendEvent::SendChannelUpdate, receiver.node.get_our_node_id());
+       let bs_channel_update = get_event_msg!(receiver, MessageSendEvent::SendChannelUpdate, initiator.node.get_our_node_id());
+
+       initiator.node.handle_channel_update(&receiver.node.get_our_node_id(), &bs_channel_update);
+       receiver.node.handle_channel_update(&initiator.node.get_our_node_id(), &as_channel_update);
+
+       assert_eq!(initiator.node.list_usable_channels().len(), 1);
+       assert_eq!(receiver.node.list_usable_channels().len(), 1);
+
+       tx
+}
+
+#[test]
+fn test_simple_0conf_channel() {
+       // If our peer tells us they will accept our channel with 0 confs, and we funded the channel,
+       // we should trust the funding won't be double-spent (assuming `trust_own_funding_0conf` is
+       // set)!
+       // Further, if we `accept_inbound_channel_from_trusted_peer_0conf`, `channel_ready` messages
+       // should fly immediately and the channel should be available for use as soon as they are
+       // received.
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       open_zero_conf_channel(&nodes[0], &nodes[1], None);
+
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+}
+
+#[test]
+fn test_0conf_channel_with_async_monitor() {
+       // Test that we properly send out channel_ready in (both inbound- and outbound-) zero-conf
+       // channels if ChannelMonitor updates return a `TemporaryFailure` during the initial channel
+       // negotiation.
+
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(chan_config), None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+
+       chan_config.channel_options.announced_channel = false;
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(chan_config)).unwrap();
+       let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       assert_eq!(accept_channel.minimum_depth, 0);
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+       let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+       let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       let channel_id = funding_output.to_channel_id();
+       nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
+
+       let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(bs_signed_locked.len(), 2);
+       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+
+       match &bs_signed_locked[0] {
+               MessageSendEvent::SendFundingSigned { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
+                       check_added_monitors!(nodes[0], 1);
+               }
+               _ => panic!("Unexpected event"),
+       }
+       match &bs_signed_locked[1] {
+               MessageSendEvent::SendChannelReady { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
+               }
+               _ => panic!("Unexpected event"),
+       }
+
+       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       nodes[0].chain_monitor.complete_sole_pending_chan_update(&channel_id);
+       let as_locked_update = nodes[0].node.get_and_clear_pending_msg_events();
+
+       // Note that the funding transaction is actually released when
+       // get_and_clear_pending_msg_events, above, checks for monitor events.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], tx);
+
+       match &as_locked_update[0] {
+               MessageSendEvent::SendChannelReady { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &msg);
+               }
+               _ => panic!("Unexpected event"),
+       }
+       let bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
+
+       let as_channel_update = match &as_locked_update[1] {
+               MessageSendEvent::SendChannelUpdate { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       msg.clone()
+               }
+               _ => panic!("Unexpected event"),
+       };
+
+       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+
+       nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_channel_update);
+       nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_channel_update);
+
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+       assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
+
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       // Now that we have useful channels, try sending a payment where the we hit a temporary monitor
+       // failure before we've ever confirmed the funding transaction. This previously caused a panic.
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       let as_send = SendEvent::from_node(&nodes[0]);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_send.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_send.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+
+       let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone();
+       nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
+       check_added_monitors!(nodes[1], 0);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+
+       let bs_send = SendEvent::from_node(&nodes[1]);
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_send.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[1], bs_send.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_payment_received!(nodes[2], payment_hash, payment_secret, 1_000_000);
+       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+
+       confirm_transaction(&nodes[0], &tx);
+       confirm_transaction(&nodes[1], &tx);
+
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+}
+
+#[test]
+fn test_0conf_close_no_early_chan_update() {
+       // Tests that even with a public channel 0conf channel, we don't generate a channel_update on
+       // closing.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // This is the default but we force it on anyway
+       chan_config.channel_options.announced_channel = true;
+       open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config));
+
+       // We can use the channel immediately, but won't generate a channel_update until we get confs
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       nodes[0].node.force_close_all_channels();
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
+       let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());
+}
+
+#[test]
+fn test_public_0conf_channel() {
+       // Tests that we will announce a public channel (after confirmation) even if its 0conf.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // This is the default but we force it on anyway
+       chan_config.channel_options.announced_channel = true;
+       let tx = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config));
+
+       // We can use the channel immediately, but we can't announce it until we get 6+ confirmations
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       let scid = confirm_transaction(&nodes[0], &tx);
+       let as_announcement_sigs = get_event_msg!(nodes[0], MessageSendEvent::SendAnnouncementSignatures, nodes[1].node.get_our_node_id());
+       assert_eq!(confirm_transaction(&nodes[1], &tx), scid);
+       let bs_announcement_sigs = get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, nodes[0].node.get_our_node_id());
+
+       nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
+       nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
+
+       let bs_announcement = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(bs_announcement.len(), 1);
+       let announcement;
+       let bs_update;
+       match bs_announcement[0] {
+               MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+                       announcement = msg.clone();
+                       bs_update = update_msg.clone();
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let as_announcement = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(as_announcement.len(), 1);
+       match as_announcement[0] {
+               MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+                       assert!(announcement == *msg);
+                       assert_eq!(update_msg.contents.short_channel_id, scid);
+                       assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
+                       assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
+               },
+               _ => panic!("Unexpected event"),
+       };
+}
+
+#[test]
+fn test_0conf_channel_reorg() {
+       // If we accept a 0conf channel, which is then confirmed, but then changes SCID in a reorg, we
+       // have to make sure we handle this correctly (or, currently, just force-close the channel).
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // This is the default but we force it on anyway
+       chan_config.channel_options.announced_channel = true;
+       let tx = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config));
+
+       // We can use the channel immediately, but we can't announce it until we get 6+ confirmations
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       mine_transaction(&nodes[0], &tx);
+       mine_transaction(&nodes[1], &tx);
+
+       // Send a payment using the channel's real SCID, which will be public in a few blocks once we
+       // can generate a channel_announcement.
+       let real_scid = nodes[0].node.list_usable_channels()[0].short_channel_id.unwrap();
+       assert_eq!(nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), real_scid);
+
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000);
+       assert_eq!(route.paths[0][0].short_channel_id, real_scid);
+       send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1]]], 10_000, payment_hash, payment_secret);
+       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+
+       disconnect_blocks(&nodes[0], 1);
+       disconnect_blocks(&nodes[1], 1);
+
+       // At this point the channel no longer has an SCID again. In the future we should likely
+       // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for
+       // now we force-close the channel here.
+       check_closed_event!(&nodes[0], 1, ClosureReason::ProcessingError {
+               err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
+       });
+       check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(&nodes[1], 1, ClosureReason::ProcessingError {
+               err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
+       });
+       check_closed_broadcast!(nodes[1], true);
+}
index 8a4ec2dc3518a4d9ca67ae7040c500563f7d9e3c..0025598e4c2400702503fe072b1d8b7459f58b39 100644 (file)
@@ -60,10 +60,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
        connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
        // Provide preimage to node 2 by claiming payment
        nodes[2].node.claim_funds(our_payment_preimage);
+       expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000);
        check_added_monitors!(nodes[2], 1);
        get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 
@@ -314,6 +315,11 @@ fn test_unconf_chan() {
        do_test_unconf_chan(false, true, false, ConnectStyle::BestBlockFirstSkippingBlocks);
        do_test_unconf_chan(true, false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
        do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+
+       do_test_unconf_chan(true, true, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, true, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(true, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
 }
 
 #[test]
@@ -331,6 +337,11 @@ fn test_unconf_chan_via_funding_unconfirmed() {
        do_test_unconf_chan(true, false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
        do_test_unconf_chan(false, false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
 
+       do_test_unconf_chan(true, true, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, true, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(true, false, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, false, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+
        do_test_unconf_chan(true, true, true, ConnectStyle::FullBlockViaListen);
        do_test_unconf_chan(false, true, true, ConnectStyle::FullBlockViaListen);
        do_test_unconf_chan(true, false, true, ConnectStyle::FullBlockViaListen);
@@ -348,8 +359,8 @@ fn test_set_outpoints_partial_claiming() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
-       let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
-       let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
+       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
        let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
@@ -363,9 +374,10 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node A to advance height towards TEST_FINAL_CLTV
        // Provide node A with both preimage
        nodes[0].node.claim_funds(payment_preimage_1);
+       expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000);
        nodes[0].node.claim_funds(payment_preimage_2);
+       expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000);
        check_added_monitors!(nodes[0], 2);
-       nodes[0].node.get_and_clear_pending_events();
        nodes[0].node.get_and_clear_pending_msg_events();
 
        // Connect blocks on node A commitment transaction
@@ -539,7 +551,9 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
 fn test_to_remote_after_local_detection() {
        do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirst);
        do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstSkippingBlocks);
+       do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstReorgsOnlyTip);
        do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirst);
        do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstSkippingBlocks);
+       do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstReorgsOnlyTip);
        do_test_to_remote_after_local_detection(ConnectStyle::FullBlockViaListen);
 }
index 063fa01608679e71011dbb39d89afbdfff1a4774..3dc245877e759fb8816ba0faa846f42386040f1a 100644 (file)
@@ -37,7 +37,7 @@ use ln::msgs::OptionalField::Present;
 
 #[test]
 fn pre_funding_lock_shutdown_test() {
-       // Test sending a shutdown prior to funding_locked after funding generation
+       // Test sending a shutdown prior to channel_ready after funding generation
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -81,7 +81,7 @@ fn updates_shutdown_wait() {
        let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
-       let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+       let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -101,8 +101,10 @@ fn updates_shutdown_wait() {
        unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
        unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
 
-       assert!(nodes[2].node.claim_funds(payment_preimage));
+       nodes[2].node.claim_funds(payment_preimage_0);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash_0, 100_000);
+
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
@@ -122,7 +124,7 @@ fn updates_shutdown_wait() {
        assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
-       expect_payment_sent!(nodes[0], payment_preimage);
+       expect_payment_sent!(nodes[0], payment_preimage_0);
 
        let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
@@ -230,7 +232,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
 
-       let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
 
        nodes[1].node.close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
@@ -270,8 +272,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       assert!(nodes[2].node.claim_funds(payment_preimage));
+       nodes[2].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 100_000);
+
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
index a0b549452830ecd85f05ba5fa76ab543ae29aa81..cbf5c77d60095d73abe235c7652eeccb8380cf04 100644 (file)
@@ -57,7 +57,7 @@ pub(crate) enum Message<T> where T: core::fmt::Debug + Type + TestEq {
        AcceptChannel(msgs::AcceptChannel),
        FundingCreated(msgs::FundingCreated),
        FundingSigned(msgs::FundingSigned),
-       FundingLocked(msgs::FundingLocked),
+       ChannelReady(msgs::ChannelReady),
        Shutdown(msgs::Shutdown),
        ClosingSigned(msgs::ClosingSigned),
        UpdateAddHTLC(msgs::UpdateAddHTLC),
@@ -97,7 +97,7 @@ impl<T> Message<T> where T: core::fmt::Debug + Type + TestEq {
                        &Message::AcceptChannel(ref msg) => msg.type_id(),
                        &Message::FundingCreated(ref msg) => msg.type_id(),
                        &Message::FundingSigned(ref msg) => msg.type_id(),
-                       &Message::FundingLocked(ref msg) => msg.type_id(),
+                       &Message::ChannelReady(ref msg) => msg.type_id(),
                        &Message::Shutdown(ref msg) => msg.type_id(),
                        &Message::ClosingSigned(ref msg) => msg.type_id(),
                        &Message::UpdateAddHTLC(ref msg) => msg.type_id(),
@@ -176,8 +176,8 @@ fn do_read<R: io::Read, T, H: core::ops::Deref>(buffer: &mut R, message_type: u1
                msgs::FundingSigned::TYPE => {
                        Ok(Message::FundingSigned(Readable::read(buffer)?))
                },
-               msgs::FundingLocked::TYPE => {
-                       Ok(Message::FundingLocked(Readable::read(buffer)?))
+               msgs::ChannelReady::TYPE => {
+                       Ok(Message::ChannelReady(Readable::read(buffer)?))
                },
                msgs::Shutdown::TYPE => {
                        Ok(Message::Shutdown(Readable::read(buffer)?))
@@ -332,7 +332,7 @@ impl Encode for msgs::FundingSigned {
        const TYPE: u16 = 35;
 }
 
-impl Encode for msgs::FundingLocked {
+impl Encode for msgs::ChannelReady {
        const TYPE: u16 = 36;
 }
 
index d85485fe82600b351e927db598889ea96a81ce1a..8731e66da9bd866bd82d00bc42c8922cd25508e8 100644 (file)
@@ -19,7 +19,7 @@ use ln::features::{ChannelFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
 use routing::scoring::{ChannelUsage, Score};
 use routing::network_graph::{DirectedChannelInfoWithUpdate, EffectiveCapacity, NetworkGraph, ReadOnlyNetworkGraph, NodeId, RoutingFees};
-use util::ser::{Writeable, Readable};
+use util::ser::{Writeable, Readable, Writer};
 use util::logger::{Level, Logger};
 use util::chacha20::ChaCha20;
 
@@ -151,8 +151,8 @@ impl Readable for Route {
 
 /// Parameters needed to find a [`Route`].
 ///
-/// Passed to [`find_route`] and also provided in [`Event::PaymentPathFailed`] for retrying a failed
-/// payment path.
+/// Passed to [`find_route`] and [`build_route_from_hops`], but also provided in
+/// [`Event::PaymentPathFailed`] for retrying a failed payment path.
 ///
 /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
 #[derive(Clone, Debug)]
@@ -383,7 +383,7 @@ enum CandidateRouteHop<'a> {
 impl<'a> CandidateRouteHop<'a> {
        fn short_channel_id(&self) -> u64 {
                match self {
-                       CandidateRouteHop::FirstHop { details } => details.short_channel_id.unwrap(),
+                       CandidateRouteHop::FirstHop { details } => details.get_outbound_payment_scid().unwrap(),
                        CandidateRouteHop::PublicHop { short_channel_id, .. } => *short_channel_id,
                        CandidateRouteHop::PrivateHop { hint } => hint.short_channel_id,
                }
@@ -676,16 +676,11 @@ pub fn find_route<L: Deref, S: Score>(
 ) -> Result<Route, LightningError>
 where L::Target: Logger {
        let network_graph = network.read_only();
-       match get_route(
-               our_node_pubkey, &route_params.payment_params, &network_graph, first_hops, route_params.final_value_msat,
-               route_params.final_cltv_expiry_delta, logger, scorer, random_seed_bytes
-       ) {
-               Ok(mut route) => {
-                       add_random_cltv_offset(&mut route, &route_params.payment_params, &network_graph, random_seed_bytes);
-                       Ok(route)
-               },
-               Err(err) => Err(err),
-       }
+       let mut route = get_route(our_node_pubkey, &route_params.payment_params, &network_graph, first_hops,
+               route_params.final_value_msat, route_params.final_cltv_expiry_delta, logger, scorer,
+               random_seed_bytes)?;
+       add_random_cltv_offset(&mut route, &route_params.payment_params, &network_graph, random_seed_bytes);
+       Ok(route)
 }
 
 pub(crate) fn get_route<L: Deref, S: Score>(
@@ -806,7 +801,7 @@ where L::Target: Logger {
                HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
        if let Some(hops) = first_hops {
                for chan in hops {
-                       if chan.short_channel_id.is_none() {
+                       if chan.get_outbound_payment_scid().is_none() {
                                panic!("first_hops should be filled in with usable channels, not pending ones");
                        }
                        if chan.counterparty.node_id == *our_node_pubkey {
@@ -1416,7 +1411,7 @@ where L::Target: Logger {
                                        let mut features_set = false;
                                        if let Some(first_channels) = first_hop_targets.get(&ordered_hops.last().unwrap().0.node_id) {
                                                for details in first_channels {
-                                                       if details.short_channel_id.unwrap() == ordered_hops.last().unwrap().0.candidate.short_channel_id() {
+                                                       if details.get_outbound_payment_scid().unwrap() == ordered_hops.last().unwrap().0.candidate.short_channel_id() {
                                                                ordered_hops.last_mut().unwrap().1 = details.counterparty.features.to_context();
                                                                features_set = true;
                                                                break;
@@ -1703,7 +1698,9 @@ where L::Target: Logger {
 // destination, if the remaining CLTV expiry delta exactly matches a feasible path in the network
 // graph. In order to improve privacy, this method obfuscates the CLTV expiry deltas along the
 // payment path by adding a randomized 'shadow route' offset to the final hop.
-fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph, random_seed_bytes: &[u8; 32]) {
+fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters,
+       network_graph: &ReadOnlyNetworkGraph, random_seed_bytes: &[u8; 32]
+) {
        let network_channels = network_graph.channels();
        let network_nodes = network_graph.nodes();
 
@@ -1785,10 +1782,84 @@ fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters,
        }
 }
 
+/// Construct a route from us (payer) to the target node (payee) via the given hops (which should
+/// exclude the payer, but include the payee). This may be useful, e.g., for probing the chosen path.
+///
+/// Re-uses logic from `find_route`, so the restrictions described there also apply here.
+pub fn build_route_from_hops<L: Deref>(
+       our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters, network: &NetworkGraph,
+       logger: L, random_seed_bytes: &[u8; 32]
+) -> Result<Route, LightningError>
+where L::Target: Logger {
+       let network_graph = network.read_only();
+       let mut route = build_route_from_hops_internal(
+               our_node_pubkey, hops, &route_params.payment_params, &network_graph,
+               route_params.final_value_msat, route_params.final_cltv_expiry_delta, logger, random_seed_bytes)?;
+       add_random_cltv_offset(&mut route, &route_params.payment_params, &network_graph, random_seed_bytes);
+       Ok(route)
+}
+
+fn build_route_from_hops_internal<L: Deref>(
+       our_node_pubkey: &PublicKey, hops: &[PublicKey], payment_params: &PaymentParameters,
+       network_graph: &ReadOnlyNetworkGraph, final_value_msat: u64, final_cltv_expiry_delta: u32,
+       logger: L, random_seed_bytes: &[u8; 32]
+) -> Result<Route, LightningError> where L::Target: Logger {
+
+       struct HopScorer {
+               our_node_id: NodeId,
+               hop_ids: [Option<NodeId>; MAX_PATH_LENGTH_ESTIMATE as usize],
+       }
+
+       impl Score for HopScorer {
+               fn channel_penalty_msat(&self, _short_channel_id: u64, source: &NodeId, target: &NodeId,
+                       _usage: ChannelUsage) -> u64
+               {
+                       let mut cur_id = self.our_node_id;
+                       for i in 0..self.hop_ids.len() {
+                               if let Some(next_id) = self.hop_ids[i] {
+                                       if cur_id == *source && next_id == *target {
+                                               return 0;
+                                       }
+                                       cur_id = next_id;
+                               } else {
+                                       break;
+                               }
+                       }
+                       u64::max_value()
+               }
+
+               fn payment_path_failed(&mut self, _path: &[&RouteHop], _short_channel_id: u64) {}
+
+               fn payment_path_successful(&mut self, _path: &[&RouteHop]) {}
+       }
+
+       impl<'a> Writeable for HopScorer {
+               #[inline]
+               fn write<W: Writer>(&self, _w: &mut W) -> Result<(), io::Error> {
+                       unreachable!();
+               }
+       }
+
+       if hops.len() > MAX_PATH_LENGTH_ESTIMATE.into() {
+               return Err(LightningError{err: "Cannot build a route exceeding the maximum path length.".to_owned(), action: ErrorAction::IgnoreError});
+       }
+
+       let our_node_id = NodeId::from_pubkey(our_node_pubkey);
+       let mut hop_ids = [None; MAX_PATH_LENGTH_ESTIMATE as usize];
+       for i in 0..hops.len() {
+               hop_ids[i] = Some(NodeId::from_pubkey(&hops[i]));
+       }
+
+       let scorer = HopScorer { our_node_id, hop_ids };
+
+       get_route(our_node_pubkey, payment_params, network_graph, None, final_value_msat,
+               final_cltv_expiry_delta, logger, &scorer, random_seed_bytes)
+}
+
 #[cfg(test)]
 mod tests {
        use routing::network_graph::{NetworkGraph, NetGraphMsgHandler, NodeId};
-       use routing::router::{get_route, add_random_cltv_offset, default_node_features,
+       use routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
                PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
                DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
        use routing::scoring::{ChannelUsage, Score};
@@ -1835,6 +1906,7 @@ mod tests {
                        funding_txo: Some(OutPoint { txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0 }),
                        channel_type: None,
                        short_channel_id,
+                       outbound_scid_alias: None,
                        inbound_scid_alias: None,
                        channel_value_satoshis: 0,
                        user_channel_id: 0,
@@ -1845,7 +1917,7 @@ mod tests {
                        unspendable_punishment_reserve: None,
                        confirmations_required: None,
                        force_close_spend_delay: None,
-                       is_outbound: true, is_funding_locked: true,
+                       is_outbound: true, is_channel_ready: true,
                        is_usable: true, is_public: true,
                        inbound_htlc_minimum_msat: None,
                        inbound_htlc_maximum_msat: None,
@@ -5486,6 +5558,26 @@ mod tests {
                assert!(path_plausibility.iter().all(|x| *x));
        }
 
+       #[test]
+       fn builds_correct_path_from_hops() {
+               let (secp_ctx, network, _, _, logger) = build_graph();
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let network_graph = network.read_only();
+
+               let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+               let payment_params = PaymentParameters::from_node_id(nodes[3]);
+               let hops = [nodes[1], nodes[2], nodes[4], nodes[3]];
+               let route = build_route_from_hops_internal(&our_id, &hops, &payment_params,
+                        &network_graph, 100, 0, Arc::clone(&logger), &random_seed_bytes).unwrap();
+               let route_hop_pubkeys = route.paths[0].iter().map(|hop| hop.pubkey).collect::<Vec<_>>();
+               assert_eq!(hops.len(), route.paths[0].len());
+               for (idx, hop_pubkey) in hops.iter().enumerate() {
+                       assert!(*hop_pubkey == route_hop_pubkeys[idx]);
+               }
+       }
+
        #[cfg(not(feature = "no-std"))]
        pub(super) fn random_init_seed() -> u64 {
                // Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
@@ -5647,6 +5739,7 @@ mod benches {
                        channel_type: None,
                        short_channel_id: Some(1),
                        inbound_scid_alias: None,
+                       outbound_scid_alias: None,
                        channel_value_satoshis: 10_000_000,
                        user_channel_id: 0,
                        balance_msat: 10_000_000,
@@ -5657,7 +5750,7 @@ mod benches {
                        confirmations_required: None,
                        force_close_spend_delay: None,
                        is_outbound: true,
-                       is_funding_locked: true,
+                       is_channel_ready: true,
                        is_usable: true,
                        is_public: true,
                        inbound_htlc_minimum_msat: None,
index 3868d29aab49f66789c49c970a662cba19000e2e..bdd222e31c58f0ab921ae2f3f057d8264b68b560 100644 (file)
@@ -22,7 +22,15 @@ pub struct ChannelHandshakeConfig {
        /// Applied only for inbound channels (see ChannelHandshakeLimits::max_minimum_depth for the
        /// equivalent limit applied to outbound channels).
        ///
+       /// A lower-bound of 1 is applied, requiring all channels to have a confirmed commitment
+       /// transaction before operation. If you wish to accept channels with zero confirmations, see
+       /// [`UserConfig::manually_accept_inbound_channels`] and
+       /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`].
+       ///
        /// Default value: 6.
+       ///
+       /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+       /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf
        pub minimum_depth: u32,
        /// Set to the number of blocks we require our counterparty to wait to claim their money (ie
        /// the number of blocks we have to punish our counterparty if they broadcast a revoked
@@ -159,6 +167,24 @@ pub struct ChannelHandshakeLimits {
        ///
        /// Default value: 144, or roughly one day and only applies to outbound channels.
        pub max_minimum_depth: u32,
+       /// Whether we implicitly trust funding transactions generated by us for our own outbound
+       /// channels to not be double-spent.
+       ///
+       /// If this is set, we assume that our own funding transactions are *never* double-spent, and
+       /// thus we can trust them without any confirmations. This is generally a reasonable
+       /// assumption, given we're the only ones who could ever double-spend it (assuming we have sole
+       /// control of the signing keys).
+       ///
+       /// You may wish to un-set this if you allow the user to (or do in an automated fashion)
+       /// double-spend the funding transaction to RBF with an alternative channel open.
+       ///
+       /// This only applies if our counterparty set their confirmations-required value to 0, and we
+       /// always trust our own funding transaction at 1 confirmation irrespective of this value.
+       /// Thus, this effectively acts as a `min_minimum_depth`, with the only possible values being
+       /// `true` (0) and `false` (1).
+       ///
+       /// Default value: true
+       pub trust_own_funding_0conf: bool,
        /// Set to force an incoming channel to match our announced channel preference in
        /// [`ChannelConfig::announced_channel`].
        ///
@@ -187,6 +213,7 @@ impl Default for ChannelHandshakeLimits {
                        min_max_htlc_value_in_flight_msat: 0,
                        max_channel_reserve_satoshis: <u64>::max_value(),
                        min_max_accepted_htlcs: 0,
+                       trust_own_funding_0conf: true,
                        max_minimum_depth: 144,
                        force_announced_channel_preference: true,
                        their_to_self_delay: MAX_LOCAL_BREAKDOWN_TIMEOUT,
index ef0c619b02e9f7a1b03cd6dd14b439e7799264dc..0a886b93f6e01eb3dc70c967bc8d439ee3805b84 100644 (file)
@@ -66,6 +66,14 @@ pub enum PaymentPurpose {
        SpontaneousPayment(PaymentPreimage),
 }
 
+impl_writeable_tlv_based_enum!(PaymentPurpose,
+       (0, InvoicePayment) => {
+               (0, payment_preimage, option),
+               (2, payment_secret, required),
+       };
+       (2, SpontaneousPayment)
+);
+
 #[derive(Clone, Debug, PartialEq)]
 /// The reason the channel was closed. See individual variants more details.
 pub enum ClosureReason {
@@ -180,8 +188,9 @@ pub enum Event {
                /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
                user_channel_id: u64,
        },
-       /// Indicates we've received money! Just gotta dig out that payment preimage and feed it to
-       /// [`ChannelManager::claim_funds`] to get it....
+       /// Indicates we've received (an offer of) money! Just gotta dig out that payment preimage and
+       /// feed it to [`ChannelManager::claim_funds`] to get it....
+       ///
        /// Note that if the preimage is not known, you should call
        /// [`ChannelManager::fail_htlc_backwards`] to free up resources for this HTLC and avoid
        /// network congestion.
@@ -200,11 +209,35 @@ pub enum Event {
                /// not stop you from registering duplicate payment hashes for inbound payments.
                payment_hash: PaymentHash,
                /// The value, in thousandths of a satoshi, that this payment is for.
-               amt: u64,
+               amount_msat: u64,
                /// Information for claiming this received payment, based on whether the purpose of the
                /// payment is to pay an invoice or to send a spontaneous payment.
                purpose: PaymentPurpose,
        },
+       /// Indicates a payment has been claimed and we've received money!
+       ///
+       /// This most likely occurs when [`ChannelManager::claim_funds`] has been called in response
+       /// to an [`Event::PaymentReceived`]. However, if we previously crashed during a
+       /// [`ChannelManager::claim_funds`] call you may see this event without a corresponding
+       /// [`Event::PaymentReceived`] event.
+       ///
+       /// # Note
+       /// LDK will not stop an inbound payment from being paid multiple times, so multiple
+       /// `PaymentReceived` events may be generated for the same payment. If you then call
+       /// [`ChannelManager::claim_funds`] twice for the same [`Event::PaymentReceived`] you may get
+       /// multiple `PaymentClaimed` events.
+       ///
+       /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+       PaymentClaimed {
+               /// The payment hash of the claimed payment. Note that LDK will not stop you from
+               /// registering duplicate payment hashes for inbound payments.
+               payment_hash: PaymentHash,
+               /// The value, in thousandths of a satoshi, that this payment is for.
+               amount_msat: u64,
+               /// The purpose of this claimed payment, i.e. whether the payment was for an invoice or a
+               /// spontaneous payment.
+               purpose: PaymentPurpose,
+       },
        /// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
        /// and we got back the payment preimage for it).
        ///
@@ -331,6 +364,10 @@ pub enum Event {
                path: Vec<RouteHop>,
                /// The channel responsible for the failed payment path.
                ///
+               /// Note that for route hints or for the first hop in a path this may be an SCID alias and
+               /// may not refer to a channel in the public network graph. These aliases may also collide
+               /// with channels in the public network graph.
+               ///
                /// If this is `Some`, then the corresponding channel should be avoided when the payment is
                /// retried. May be `None` for older [`Event`] serializations.
                short_channel_id: Option<u64>,
@@ -475,7 +512,7 @@ impl Writeable for Event {
                                // We never write out FundingGenerationReady events as, upon disconnection, peers
                                // drop any channels which have not yet exchanged funding_signed.
                        },
-                       &Event::PaymentReceived { ref payment_hash, ref amt, ref purpose } => {
+                       &Event::PaymentReceived { ref payment_hash, ref amount_msat, ref purpose } => {
                                1u8.write(writer)?;
                                let mut payment_secret = None;
                                let payment_preimage;
@@ -491,7 +528,7 @@ impl Writeable for Event {
                                write_tlv_fields!(writer, {
                                        (0, payment_hash, required),
                                        (2, payment_secret, option),
-                                       (4, amt, required),
+                                       (4, amount_msat, required),
                                        (6, 0u64, required), // user_payment_id required for compatibility with 0.0.103 and earlier
                                        (8, payment_preimage, option),
                                });
@@ -584,6 +621,14 @@ impl Writeable for Event {
                                // We never write the OpenChannelRequest events as, upon disconnection, peers
                                // drop any channels which have not yet exchanged funding_signed.
                        },
+                       &Event::PaymentClaimed { ref payment_hash, ref amount_msat, ref purpose } => {
+                               19u8.write(writer)?;
+                               write_tlv_fields!(writer, {
+                                       (0, payment_hash, required),
+                                       (2, purpose, required),
+                                       (4, amount_msat, required),
+                               });
+                       },
                        // Note that, going forward, all new events must only write data inside of
                        // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
                        // data via `write_tlv_fields`.
@@ -602,12 +647,12 @@ impl MaybeReadable for Event {
                                        let mut payment_hash = PaymentHash([0; 32]);
                                        let mut payment_preimage = None;
                                        let mut payment_secret = None;
-                                       let mut amt = 0;
+                                       let mut amount_msat = 0;
                                        let mut _user_payment_id = None::<u64>; // For compatibility with 0.0.103 and earlier
                                        read_tlv_fields!(reader, {
                                                (0, payment_hash, required),
                                                (2, payment_secret, option),
-                                               (4, amt, required),
+                                               (4, amount_msat, required),
                                                (6, _user_payment_id, option),
                                                (8, payment_preimage, option),
                                        });
@@ -621,7 +666,7 @@ impl MaybeReadable for Event {
                                        };
                                        Ok(Some(Event::PaymentReceived {
                                                payment_hash,
-                                               amt,
+                                               amount_msat,
                                                purpose,
                                        }))
                                };
@@ -784,6 +829,25 @@ impl MaybeReadable for Event {
                                // Value 17 is used for `Event::OpenChannelRequest`.
                                Ok(None)
                        },
+                       19u8 => {
+                               let f = || {
+                                       let mut payment_hash = PaymentHash([0; 32]);
+                                       let mut purpose = None;
+                                       let mut amount_msat = 0;
+                                       read_tlv_fields!(reader, {
+                                               (0, payment_hash, required),
+                                               (2, purpose, ignorable),
+                                               (4, amount_msat, required),
+                                       });
+                                       if purpose.is_none() { return Ok(None); }
+                                       Ok(Some(Event::PaymentClaimed {
+                                               payment_hash,
+                                               purpose: purpose.unwrap(),
+                                               amount_msat,
+                                       }))
+                               };
+                               f()
+                       },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
                        // reads.
@@ -838,12 +902,12 @@ pub enum MessageSendEvent {
                /// The message which should be sent.
                msg: msgs::FundingSigned,
        },
-       /// Used to indicate that a funding_locked message should be sent to the peer with the given node_id.
-       SendFundingLocked {
+       /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id.
+       SendChannelReady {
                /// The node_id of the node which should receive these message(s)
                node_id: PublicKey,
-               /// The funding_locked message which should be sent.
-               msg: msgs::FundingLocked,
+               /// The channel_ready message which should be sent.
+               msg: msgs::ChannelReady,
        },
        /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id.
        SendAnnouncementSignatures {
index 8552358c35ae8d8fe8532f66cd9346409405c2c5..676c303bfa8dc1384b404cfe355aa77d99990242 100644 (file)
@@ -79,6 +79,10 @@ pub(crate) mod fake_scid {
        const MAX_NAMESPACES: u8 = 8; // We allocate 3 bits for the namespace identifier.
        const NAMESPACE_ID_BITMASK: u8 = 0b111;
 
+       const BLOCKS_PER_MONTH: u32 = 144 /* blocks per day */ * 30 /* days per month */;
+       pub(crate) const MAX_SCID_BLOCKS_FROM_NOW: u32 = BLOCKS_PER_MONTH;
+
+
        /// Fake scids are divided into namespaces, with each namespace having its own identifier between
        /// [0..7]. This allows us to identify what namespace a fake scid corresponds to upon HTLC
        /// receipt, and handle the HTLC accordingly. The namespace identifier is encrypted when encoded
@@ -100,7 +104,6 @@ pub(crate) mod fake_scid {
                        // Ensure we haven't created a namespace that doesn't fit into the 3 bits we've allocated for
                        // namespaces.
                        assert!((*self as u8) < MAX_NAMESPACES);
-                       const BLOCKS_PER_MONTH: u32 = 144 /* blocks per day */ * 30 /* days per month */;
                        let rand_bytes = keys_manager.get_secure_random_bytes();
 
                        let segwit_activation_height = segwit_activation_height(genesis_hash);
@@ -109,7 +112,7 @@ pub(crate) mod fake_scid {
                        // We want to ensure that this fake channel won't conflict with any transactions we haven't
                        // seen yet, in case `highest_seen_blockheight` is updated before we get full information
                        // about transactions confirmed in the given block.
-                       blocks_since_segwit_activation = blocks_since_segwit_activation.saturating_sub(BLOCKS_PER_MONTH);
+                       blocks_since_segwit_activation = blocks_since_segwit_activation.saturating_sub(MAX_SCID_BLOCKS_FROM_NOW);
 
                        let rand_for_height = u32::from_be_bytes(rand_bytes[..4].try_into().unwrap());
                        let fake_scid_height = segwit_activation_height + rand_for_height % (blocks_since_segwit_activation + 1);
index 3682a0e8f0af02d1ac654853bff16e3096f4025a..4f3d800be5dd8479f79a7999318fc627017e7078 100644 (file)
@@ -31,7 +31,7 @@ use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::Block;
 use bitcoin::network::constants::Network;
 use bitcoin::hash_types::{BlockHash, Txid};
 
@@ -116,6 +116,11 @@ impl<'a> TestChainMonitor<'a> {
                        expect_channel_force_closed: Mutex::new(None),
                }
        }
+
+       pub fn complete_sole_pending_chan_update(&self, channel_id: &[u8; 32]) {
+               let (outpoint, _, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone();
+               self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
+       }
 }
 impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
        fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
@@ -224,11 +229,11 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
 
 pub struct TestBroadcaster {
        pub txn_broadcasted: Mutex<Vec<Transaction>>,
-       pub blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>,
+       pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
 }
 
 impl TestBroadcaster {
-       pub fn new(blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>) -> TestBroadcaster {
+       pub fn new(blocks: Arc<Mutex<Vec<(Block, u32)>>>) -> TestBroadcaster {
                TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks }
        }
 }
@@ -302,8 +307,8 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
        fn handle_funding_signed(&self, _their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
                self.received_msg(wire::Message::FundingSigned(msg.clone()));
        }
-       fn handle_funding_locked(&self, _their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
-               self.received_msg(wire::Message::FundingLocked(msg.clone()));
+       fn handle_channel_ready(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReady) {
+               self.received_msg(wire::Message::ChannelReady(msg.clone()));
        }
        fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) {
                self.received_msg(wire::Message::Shutdown(msg.clone()));