Merge pull request #2014 from valentinewallace/2023-02-rework-partial-pmt-fail
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 23 Feb 2023 21:54:16 +0000 (21:54 +0000)
committerGitHub <noreply@github.com>
Thu, 23 Feb 2023 21:54:16 +0000 (21:54 +0000)
Rework auto-retry send errors

26 files changed:
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/indexedmap.rs
lightning-background-processor/src/lib.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/onion_message/functional_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/routing/gossip.rs
lightning/src/util/indexed_map.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs

index 39887e838401c545a6002e128d58c6330eb108d2..4386302f845b0d95279bbba99c975dede237df8a 100644 (file)
@@ -295,7 +295,7 @@ fn check_api_err(api_err: APIError) {
                        // all others. If you hit this panic, the list of acceptable errors
                        // is probably just stale and you should add new messages here.
                        match err.as_str() {
-                               "Peer for first hop currently disconnected/pending monitor update!" => {},
+                               "Peer for first hop currently disconnected" => {},
                                _ if err.starts_with("Cannot push more than their max accepted HTLCs ") => {},
                                _ if err.starts_with("Cannot send value that would put us over the max HTLC value in flight our peer will accept ") => {},
                                _ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
@@ -474,8 +474,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
        let mut channel_txn = Vec::new();
        macro_rules! make_channel {
                ($source: expr, $dest: expr, $chan_id: expr) => { {
-                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }).unwrap();
-                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }).unwrap();
+                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: $dest.init_features(), remote_network_address: None }, true).unwrap();
+                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: $source.init_features(), remote_network_address: None }, false).unwrap();
 
                        $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
                        let open_channel = {
@@ -979,31 +979,31 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                        0x0c => {
                                if !chan_a_disconnected {
-                                       nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
-                                       nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+                                       nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
+                                       nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
                                        chan_a_disconnected = true;
                                        drain_msg_events_on_disconnect!(0);
                                }
                        },
                        0x0d => {
                                if !chan_b_disconnected {
-                                       nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
-                                       nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+                                       nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
+                                       nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
                                        chan_b_disconnected = true;
                                        drain_msg_events_on_disconnect!(2);
                                }
                        },
                        0x0e => {
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_a_disconnected = false;
                                }
                        },
                        0x0f => {
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_b_disconnected = false;
                                }
                        },
@@ -1040,7 +1040,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                        0x2c => {
                                if !chan_a_disconnected {
-                                       nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+                                       nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
                                        chan_a_disconnected = true;
                                        drain_msg_events_on_disconnect!(0);
                                }
@@ -1054,14 +1054,14 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        },
                        0x2d => {
                                if !chan_a_disconnected {
-                                       nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
+                                       nodes[0].peer_disconnected(&nodes[1].get_our_node_id());
                                        chan_a_disconnected = true;
                                        nodes[0].get_and_clear_pending_msg_events();
                                        ab_events.clear();
                                        ba_events.clear();
                                }
                                if !chan_b_disconnected {
-                                       nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+                                       nodes[2].peer_disconnected(&nodes[1].get_our_node_id());
                                        chan_b_disconnected = true;
                                        nodes[2].get_and_clear_pending_msg_events();
                                        bc_events.clear();
@@ -1073,7 +1073,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        },
                        0x2e => {
                                if !chan_b_disconnected {
-                                       nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
+                                       nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
                                        chan_b_disconnected = true;
                                        drain_msg_events_on_disconnect!(2);
                                }
@@ -1198,13 +1198,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                                // Next, make sure peers are all connected to each other
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: nodes[0].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_a_disconnected = false;
                                }
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }).unwrap();
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }).unwrap();
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), remote_network_address: None }, true).unwrap();
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), remote_network_address: None }, false).unwrap();
                                        chan_b_disconnected = false;
                                }
 
index 9b3b76c2b3a7d4e60f9d7354406411b2b42a294f..ca42466880a110e3e0e24873efe85f377a4974ad 100644 (file)
@@ -634,11 +634,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                                        if let Err(e) = channelmanager.funding_transaction_generated(&funding_generation.0, &funding_generation.1, tx.clone()) {
                                                // It's possible the channel has been closed in the mean time, but any other
                                                // failure may be a bug.
-                                               if let APIError::ChannelUnavailable { err } = e {
-                                                       if !err.starts_with("Can't find a peer matching the passed counterparty node_id ") {
-                                                               assert_eq!(err, "No such channel");
-                                                       }
-                                               } else { panic!(); }
+                                               if let APIError::ChannelUnavailable { .. } = e { } else { panic!(); }
                                        }
                                        pending_funding_signatures.insert(funding_output, tx);
                                }
index 795d6175bb5dc2dd8aee2b0cdce76ccc6a95c12b..7cbb8957ad247f807ff91f8851af44f0b8a546a8 100644 (file)
@@ -13,14 +13,27 @@ use hashbrown::HashSet;
 
 use crate::utils::test_logger;
 
-fn check_eq(btree: &BTreeMap<u8, u8>, indexed: &IndexedMap<u8, u8>) {
+use std::ops::{RangeBounds, Bound};
+
+struct ExclLowerInclUpper(u8, u8);
+impl RangeBounds<u8> for ExclLowerInclUpper {
+       fn start_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.0) }
+       fn end_bound(&self) -> Bound<&u8> { Bound::Included(&self.1) }
+}
+struct ExclLowerExclUpper(u8, u8);
+impl RangeBounds<u8> for ExclLowerExclUpper {
+       fn start_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.0) }
+       fn end_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.1) }
+}
+
+fn check_eq(btree: &BTreeMap<u8, u8>, mut indexed: IndexedMap<u8, u8>) {
        assert_eq!(btree.len(), indexed.len());
        assert_eq!(btree.is_empty(), indexed.is_empty());
 
        let mut btree_clone = btree.clone();
        assert!(btree_clone == *btree);
        let mut indexed_clone = indexed.clone();
-       assert!(indexed_clone == *indexed);
+       assert!(indexed_clone == indexed);
 
        for k in 0..=255 {
                assert_eq!(btree.contains_key(&k), indexed.contains_key(&k));
@@ -43,16 +56,27 @@ fn check_eq(btree: &BTreeMap<u8, u8>, indexed: &IndexedMap<u8, u8>) {
        }
 
        const STRIDE: u8 = 16;
-       for k in 0..=255/STRIDE {
-               let lower_bound = k * STRIDE;
-               let upper_bound = lower_bound + (STRIDE - 1);
-               let mut btree_iter = btree.range(lower_bound..=upper_bound);
-               let mut indexed_iter = indexed.range(lower_bound..=upper_bound);
-               loop {
-                       let b_v = btree_iter.next();
-                       let i_v = indexed_iter.next();
-                       assert_eq!(b_v, i_v);
-                       if b_v.is_none() { break; }
+       for range_type in 0..4 {
+               for k in 0..=255/STRIDE {
+                       let lower_bound = k * STRIDE;
+                       let upper_bound = lower_bound + (STRIDE - 1);
+                       macro_rules! range { ($map: expr) => {
+                               match range_type {
+                                       0 => $map.range(lower_bound..upper_bound),
+                                       1 => $map.range(lower_bound..=upper_bound),
+                                       2 => $map.range(ExclLowerInclUpper(lower_bound, upper_bound)),
+                                       3 => $map.range(ExclLowerExclUpper(lower_bound, upper_bound)),
+                                       _ => unreachable!(),
+                               }
+                       } }
+                       let mut btree_iter = range!(btree);
+                       let mut indexed_iter = range!(indexed);
+                       loop {
+                               let b_v = btree_iter.next();
+                               let i_v = indexed_iter.next();
+                               assert_eq!(b_v, i_v);
+                               if b_v.is_none() { break; }
+                       }
                }
        }
 
@@ -91,7 +115,7 @@ pub fn do_test(data: &[u8]) {
                let prev_value_i = indexed.insert(tuple[0], tuple[1]);
                assert_eq!(prev_value_b, prev_value_i);
        }
-       check_eq(&btree, &indexed);
+       check_eq(&btree, indexed.clone());
 
        // Now, modify the maps in all the ways we have to do so, checking that the maps remain
        // equivalent as we go.
@@ -99,7 +123,7 @@ pub fn do_test(data: &[u8]) {
                *v = *k;
                *btree.get_mut(k).unwrap() = *k;
        }
-       check_eq(&btree, &indexed);
+       check_eq(&btree, indexed.clone());
 
        for k in 0..=255 {
                match btree.entry(k) {
@@ -124,7 +148,7 @@ pub fn do_test(data: &[u8]) {
                        },
                }
        }
-       check_eq(&btree, &indexed);
+       check_eq(&btree, indexed);
 }
 
 pub fn indexedmap_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
index a21f00f867cb36c1dc4d5c596f29dc2af551c5de..97d0462eb5010980850c64b53dc37a07c512f476 100644 (file)
@@ -963,8 +963,8 @@ mod tests {
 
                for i in 0..num_nodes {
                        for j in (i+1)..num_nodes {
-                               nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }).unwrap();
-                               nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }).unwrap();
+                               nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
+                               nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
                        }
                }
 
index f6cc87fa72af2d0645b3b1538199b28f4265d5a3..868f0b297b1162da0551c8923b4ced23ab880ccb 100644 (file)
@@ -842,13 +842,13 @@ mod test {
 
                // With only one sufficient-value peer connected we should only get its hint
                scid_aliases.remove(&chan_b.0.short_channel_id_alias.unwrap());
-               nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
                match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases.clone());
 
                // If we don't have any sufficient-value peers connected we should get all hints with
                // sufficient value, even though there is a connected insufficient-value peer.
                scid_aliases.insert(chan_b.0.short_channel_id_alias.unwrap());
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                match_invoice_routes(Some(1_000_000_000), &nodes[0], scid_aliases);
        }
 
index b259f77eff8f9ace67fec3b2a666122b0ae3c12a..81e6157c656f6e39e76525e33ca09e1b55870325 100644 (file)
@@ -617,7 +617,7 @@ mod tests {
                fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
                fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
                fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> { None }
-               fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> { Ok(()) }
+               fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
                fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
                fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
                fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -643,13 +643,13 @@ mod tests {
                fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &UpdateFee) {}
                fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &AnnouncementSignatures) {}
                fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &ChannelUpdate) {}
-               fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+               fn peer_disconnected(&self, their_node_id: &PublicKey) {
                        if *their_node_id == self.expected_pubkey {
                                self.disconnected_flag.store(true, Ordering::SeqCst);
                                self.pubkey_disconnected.clone().try_send(()).unwrap();
                        }
                }
-               fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init) -> Result<(), ()> {
+               fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &Init, _inbound: bool) -> Result<(), ()> {
                        if *their_node_id == self.expected_pubkey {
                                self.pubkey_connected.clone().try_send(()).unwrap();
                        }
index 3a2077209c4ea36ebf3b412a048f845e1b344c1f..9a74f891b1e985b371f2640ccf23f7c992e626ec 100644 (file)
@@ -796,7 +796,7 @@ mod tests {
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::ChannelMessageHandler;
        use crate::util::errors::APIError;
-       use crate::util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
+       use crate::util::events::{Event, ClosureReason, MessageSendEvent, MessageSendEventsProvider};
 
        #[test]
        fn test_async_ooo_offchain_updates() {
@@ -819,10 +819,8 @@ mod tests {
 
                nodes[1].node.claim_funds(payment_preimage_1);
                check_added_monitors!(nodes[1], 1);
-               expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
                nodes[1].node.claim_funds(payment_preimage_2);
                check_added_monitors!(nodes[1], 1);
-               expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
 
                let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
                assert_eq!(persistences.len(), 1);
@@ -850,8 +848,24 @@ mod tests {
                        .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
                assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
                nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
 
+               let claim_events = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(claim_events.len(), 2);
+               match claim_events[0] {
+                       Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+                               assert_eq!(payment_hash_1, *payment_hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match claim_events[1] {
+                       Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+                               assert_eq!(payment_hash_2, *payment_hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
                // Now manually walk the commitment signed dance - because we claimed two payments
                // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
 
index fdb66cc4dc8224d6c95b4abd57bdeaf24983509a..1532884fa605c5ba363720aa6701b602c83d92a2 100644 (file)
@@ -143,7 +143,7 @@ fn test_monitor_and_persister_update_fail() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
-               if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Check that even though the persister is returning a InProgress,
                        // because the update is bogus, ultimately the error that's returned
                        // should be a PermanentFailure.
@@ -181,8 +181,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        assert_eq!(nodes[0].node.list_channels().len(), 1);
 
        if disconnect {
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
                reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
@@ -234,8 +234,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        assert_eq!(nodes[0].node.list_channels().len(), 1);
 
        if disconnect {
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
                reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
@@ -337,8 +337,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        };
 
        if disconnect_count & !disconnect_flags > 0 {
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        }
 
        // Now fix monitor updating...
@@ -348,13 +348,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        check_added_monitors!(nodes[0], 0);
 
        macro_rules! disconnect_reconnect_peers { () => { {
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -373,10 +373,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -1110,8 +1110,8 @@ fn test_monitor_update_fail_reestablish() {
 
        let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        nodes[2].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[2], 1);
@@ -1130,8 +1130,8 @@ fn test_monitor_update_fail_reestablish() {
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
@@ -1146,11 +1146,11 @@ fn test_monitor_update_fail_reestablish() {
        nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
        check_added_monitors!(nodes[1], 1);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
        assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
@@ -1315,15 +1315,15 @@ fn claim_while_disconnected_monitor_update_fail() {
        // Forward a payment for B to claim
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
@@ -1451,11 +1451,11 @@ fn monitor_failed_no_reestablish_response() {
 
        // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
        // is still failing to update monitors.
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
        let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
@@ -1602,7 +1602,6 @@ fn test_monitor_update_fail_claim() {
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
-       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
@@ -1628,6 +1627,7 @@ fn test_monitor_update_fail_claim() {
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
 
        let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
        nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
@@ -1645,6 +1645,7 @@ fn test_monitor_update_fail_claim() {
        let channel_id = chan_1.2;
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 0);
 
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1653,7 +1654,7 @@ fn test_monitor_update_fail_claim() {
        expect_payment_sent!(nodes[0], payment_preimage_1);
 
        // Get the payment forwards, note that they were batched into one commitment update.
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       nodes[1].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[1], 1);
        let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
@@ -1739,7 +1740,6 @@ fn test_monitor_update_on_pending_forwards() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1753,17 +1753,17 @@ fn test_monitor_update_on_pending_forwards() {
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 3);
-       if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[0] {
+       if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
                assert_eq!(payment_hash, payment_hash_1);
                assert!(payment_failed_permanently);
        } else { panic!("Unexpected event!"); }
-       match events[1] {
+       match events[2] {
                Event::PaymentFailed { payment_hash, .. } => {
                        assert_eq!(payment_hash, payment_hash_1);
                },
                _ => panic!("Unexpected event"),
        }
-       match events[2] {
+       match events[0] {
                Event::PendingHTLCsForwardable { .. } => { },
                _ => panic!("Unexpected event"),
        };
@@ -1803,7 +1803,6 @@ fn monitor_update_claim_fail_no_response() {
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
-       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1811,6 +1810,7 @@ fn monitor_update_claim_fail_no_response() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -1879,8 +1879,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        }
 
        // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -2047,12 +2047,12 @@ fn test_pending_update_fee_ack_on_reconnect() {
        let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
        // bs_first_raa is not delivered until it is re-generated after reconnect
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
@@ -2175,12 +2175,12 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
@@ -2290,7 +2290,6 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.claim_funds(payment_preimage_0);
        check_added_monitors!(nodes[0], 1);
-       expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
@@ -2309,15 +2308,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
                        reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
                } else {
-                       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+                       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                }
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
                // Now reconnect the two
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -2353,6 +2352,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
+       expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
 
        // New outbound messages should be generated immediately upon a call to
        // get_and_clear_pending_msg_events (but not before).
@@ -2499,8 +2499,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        }
 
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        if second_fails {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
@@ -2606,7 +2606,15 @@ fn test_permanent_error_during_sending_shutdown() {
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
-       check_closed_broadcast!(nodes[0], true);
+
+       // We always send the `shutdown` response when initiating a shutdown, even if we immediately
+       // close the channel thereafter.
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 3);
+       if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+       if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
+
        check_added_monitors!(nodes[0], 2);
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
@@ -2629,7 +2637,15 @@ fn test_permanent_error_during_handling_shutdown() {
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
        let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
-       check_closed_broadcast!(nodes[1], true);
+
+       // We always send the `shutdown` response when receiving a shutdown, even if we immediately
+       // close the channel thereafter.
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 3);
+       if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+       if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
+
        check_added_monitors!(nodes[1], 2);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
@@ -2651,7 +2667,6 @@ fn double_temp_error() {
        // `claim_funds` results in a ChannelMonitorUpdate.
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
-       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
@@ -2659,7 +2674,6 @@ fn double_temp_error() {
        // which had some asserts that prevented it from being called twice.
        nodes[1].node.claim_funds(payment_preimage_2);
        check_added_monitors!(nodes[1], 1);
-       expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -2668,11 +2682,24 @@ fn double_temp_error() {
        check_added_monitors!(nodes[1], 0);
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
 
-       // Complete the first HTLC.
-       let events = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
+       // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
+       // and get both PaymentClaimed events at once.
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
+               _ => panic!("Unexpected Event: {:?}", events[0]),
+       }
+       match events[1] {
+               Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
+               _ => panic!("Unexpected Event: {:?}", events[1]),
+       }
+
+       assert_eq!(msg_events.len(), 1);
        let (update_fulfill_1, commitment_signed_b1, node_id) = {
-               match &events[0] {
+               match &msg_events[0] {
                        &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
                                assert!(update_add_htlcs.is_empty());
                                assert_eq!(update_fulfill_htlcs.len(), 1);
index 62a2b7b51e52b723231fec7bec63f088c4c793a2..e3641a6204a96d6685098a86ba6fc9d67309e15d 100644 (file)
@@ -393,35 +393,21 @@ enum UpdateFulfillFetch {
 }
 
 /// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch {
+pub enum UpdateFulfillCommitFetch<'a> {
        /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
        /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
        /// previously placed in the holding cell (and has since been removed).
        NewClaim {
                /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
-               monitor_update: ChannelMonitorUpdate,
+               monitor_update: &'a ChannelMonitorUpdate,
                /// The value of the HTLC which was claimed, in msat.
                htlc_value_msat: u64,
-               /// The update_fulfill message and commitment_signed message (if the claim was not placed
-               /// in the holding cell).
-               msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
        },
        /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
        /// or has been forgotten (presumably previously claimed).
        DuplicateClaim {},
 }
 
-/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC
-/// state.
-pub(super) struct RAAUpdates {
-       pub commitment_update: Option<msgs::CommitmentUpdate>,
-       pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
-       pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
-       pub finalized_claimed_htlcs: Vec<HTLCSource>,
-       pub monitor_update: ChannelMonitorUpdate,
-       pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
-}
-
 /// The return value of `monitor_updating_restored`
 pub(super) struct MonitorRestoreUpdates {
        pub raa: Option<msgs::RevokeAndACK>,
@@ -558,6 +544,11 @@ pub(super) struct Channel<Signer: ChannelSigner> {
        monitor_pending_channel_ready: bool,
        monitor_pending_revoke_and_ack: bool,
        monitor_pending_commitment_signed: bool,
+
+       // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
+       // responsible for some of the HTLCs here or not - we don't know whether the update in question
+       // completed or not. We currently ignore these fields entirely when force-closing a channel,
+       // but need to handle this somehow or we run the risk of losing HTLCs!
        monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        monitor_pending_finalized_fulfills: Vec<HTLCSource>,
@@ -743,6 +734,12 @@ pub(super) struct Channel<Signer: ChannelSigner> {
        /// The unique identifier used to re-derive the private key material for the channel through
        /// [`SignerProvider::derive_channel_signer`].
        channel_keys_id: [u8; 32],
+
+       /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
+       /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
+       /// completes we still need to be able to complete the persistence. Thus, we have to keep a
+       /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
+       pending_monitor_updates: Vec<ChannelMonitorUpdate>,
 }
 
 #[cfg(any(test, fuzzing))]
@@ -1112,6 +1109,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                        channel_type,
                        channel_keys_id,
+
+                       pending_monitor_updates: Vec::new(),
                })
        }
 
@@ -1458,6 +1457,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                        channel_type,
                        channel_keys_id,
+
+                       pending_monitor_updates: Vec::new(),
                };
 
                Ok(chan)
@@ -1964,22 +1965,30 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
        }
 
-       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
+       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
-                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
-                               let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
-                                       Err(e) => return Err((e, monitor_update)),
-                                       Ok(res) => res
-                               };
-                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
-                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
+                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                               self.pending_monitor_updates.push(monitor_update);
+                               UpdateFulfillCommitFetch::NewClaim {
+                                       monitor_update: self.pending_monitor_updates.last().unwrap(),
+                                       htlc_value_msat,
+                               }
                        },
-                       UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
-                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
-                       UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
+                       UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
+                               self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                               self.pending_monitor_updates.push(monitor_update);
+                               UpdateFulfillCommitFetch::NewClaim {
+                                       monitor_update: self.pending_monitor_updates.last().unwrap(),
+                                       htlc_value_msat,
+                               }
+                       }
+                       UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
                }
        }
 
@@ -2259,9 +2268,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn funding_created<SP: Deref, L: Deref>(
                &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Option<msgs::ChannelReady>), ChannelError>
+       ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
        where
-               SP::Target: SignerProvider,
+               SP::Target: SignerProvider<Signer = Signer>,
                L::Target: Logger
        {
                if self.is_outbound() {
@@ -2337,19 +2346,22 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
 
+               let need_channel_ready = self.check_get_channel_ready(0).is_some();
+               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
-               }, channel_monitor, self.check_get_channel_ready(0)))
+               }, channel_monitor))
        }
 
        /// Handles a funding_signed message from the remote end.
        /// If this call is successful, broadcast the funding transaction (and not before!)
        pub fn funding_signed<SP: Deref, L: Deref>(
                &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError>
+       ) -> Result<ChannelMonitor<Signer>, ChannelError>
        where
-               SP::Target: SignerProvider,
+               SP::Target: SignerProvider<Signer = Signer>,
                L::Target: Logger
        {
                if !self.is_outbound() {
@@ -2422,7 +2434,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
 
-               Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
+               let need_channel_ready = self.check_get_channel_ready(0).is_some();
+               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+               Ok(channel_monitor)
        }
 
        /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
@@ -3034,17 +3048,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(())
        }
 
-       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)>
+       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError>
                where L::Target: Logger
        {
                if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())));
+                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())));
+                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
                }
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
-                       return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())));
+                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
                let funding_script = self.get_funding_redeemscript();
@@ -3062,7 +3076,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
                                log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
                        if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
-                               return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
+                               return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
                        }
                        bitcoin_tx.txid
                };
@@ -3077,7 +3091,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        debug_assert!(!self.is_outbound());
                        let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000;
                        if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
-                               return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())));
+                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
                        }
                }
                #[cfg(any(test, fuzzing))]
@@ -3099,7 +3113,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
 
                if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
-                       return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))));
+                       return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
                }
 
                // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update
@@ -3117,7 +3131,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
                                        encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
                                if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
-                                       return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
+                                       return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
                                }
                                htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
                        } else {
@@ -3133,10 +3147,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.counterparty_funding_pubkey()
                );
 
-               let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number - 1, &self.secp_ctx);
                self.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
-                       .map_err(|_| (None, ChannelError::Close("Failed to validate our commitment".to_owned())))?;
-               let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 1);
+                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
 
                // Update state now that we've passed all the can-fail calls...
                let mut need_commitment = false;
@@ -3181,7 +3193,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                self.cur_holder_commitment_transaction_number -= 1;
                // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
-               // send_commitment_no_status_check() next which will reset this to RAAFirst.
+               // build_commitment_no_status_check() next which will reset this to RAAFirst.
                self.resend_order = RAACommitmentOrder::CommitmentFirst;
 
                if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
@@ -3193,52 +3205,50 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // the corresponding HTLC status updates so that get_last_commitment_update
                                // includes the right HTLCs.
                                self.monitor_pending_commitment_signed = true;
-                               let (_, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?;
-                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
                        }
                        log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
                                log_bytes!(self.channel_id));
-                       return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
+                       self.pending_monitor_updates.push(monitor_update);
+                       return Ok(self.pending_monitor_updates.last().unwrap());
                }
 
-               let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+               let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                        // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
                        // we'll send one right away when we get the revoke_and_ack when we
                        // free_holding_cell_htlcs().
-                       let (msg, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?;
-                       // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                        // strictly increasing by one, so decrement it here.
                        self.latest_monitor_update_id = monitor_update.update_id;
                        monitor_update.updates.append(&mut additional_update.updates);
-                       Some(msg)
-               } else { None };
+                       true
+               } else { false };
 
                log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
-                       log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" });
-
-               Ok((msgs::RevokeAndACK {
-                       channel_id: self.channel_id,
-                       per_commitment_secret,
-                       next_per_commitment_point,
-               }, commitment_signed, monitor_update))
+                       log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+               self.pending_monitor_updates.push(monitor_update);
+               self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+               return Ok(self.pending_monitor_updates.last().unwrap());
        }
 
        /// Public version of the below, checking relevant preconditions first.
        /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
        /// returns `(None, Vec::new())`.
-       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
+       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
                if self.channel_state >= ChannelState::ChannelReady as u32 &&
                   (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
                        self.free_holding_cell_htlcs(logger)
-               } else { Ok((None, Vec::new())) }
+               } else { (None, Vec::new()) }
        }
 
        /// Frees any pending commitment updates in the holding cell, generating the relevant messages
        /// for our counterparty.
-       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
+       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(),
@@ -3319,7 +3329,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }
                        }
                        if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() {
-                               return Ok((None, htlcs_to_fail));
+                               return (None, htlcs_to_fail);
                        }
                        let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() {
                                self.send_update_fee(feerate, false, logger)
@@ -3327,8 +3337,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                None
                        };
 
-                       let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?;
-                       // send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                       // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
                        // but we want them to be strictly increasing by one, so reset it here.
                        self.latest_monitor_update_id = monitor_update.update_id;
                        monitor_update.updates.append(&mut additional_update.updates);
@@ -3337,16 +3347,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
 
-                       Ok((Some((msgs::CommitmentUpdate {
-                               update_add_htlcs,
-                               update_fulfill_htlcs,
-                               update_fail_htlcs,
-                               update_fail_malformed_htlcs: Vec::new(),
-                               update_fee,
-                               commitment_signed,
-                       }, monitor_update)), htlcs_to_fail))
+                       self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                       self.pending_monitor_updates.push(monitor_update);
+                       (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail)
                } else {
-                       Ok((None, Vec::new()))
+                       (None, Vec::new())
                }
        }
 
@@ -3355,7 +3360,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<RAAUpdates, ChannelError>
+       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError>
                where L::Target: Logger,
        {
                if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
@@ -3542,8 +3547,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // When the monitor updating is restored we'll call get_last_commitment_update(),
                                // which does not update state, but we're definitely now awaiting a remote revoke
                                // before we can step forward any more, so set it here.
-                               let (_, mut additional_update) = self.send_commitment_no_status_check(logger)?;
-                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
@@ -3552,71 +3557,41 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
                        self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
-                       return Ok(RAAUpdates {
-                               commitment_update: None, finalized_claimed_htlcs: Vec::new(),
-                               accepted_htlcs: Vec::new(), failed_htlcs: Vec::new(),
-                               monitor_update,
-                               holding_cell_failed_htlcs: Vec::new()
-                       });
+                       self.pending_monitor_updates.push(monitor_update);
+                       return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap()));
                }
 
-               match self.free_holding_cell_htlcs(logger)? {
-                       (Some((mut commitment_update, mut additional_update)), htlcs_to_fail) => {
-                               commitment_update.update_fail_htlcs.reserve(update_fail_htlcs.len());
-                               for fail_msg in update_fail_htlcs.drain(..) {
-                                       commitment_update.update_fail_htlcs.push(fail_msg);
-                               }
-                               commitment_update.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
-                               for fail_msg in update_fail_malformed_htlcs.drain(..) {
-                                       commitment_update.update_fail_malformed_htlcs.push(fail_msg);
-                               }
-
+               match self.free_holding_cell_htlcs(logger) {
+                       (Some(_), htlcs_to_fail) => {
+                               let mut additional_update = self.pending_monitor_updates.pop().unwrap();
                                // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
 
-                               Ok(RAAUpdates {
-                                       commitment_update: Some(commitment_update),
-                                       finalized_claimed_htlcs,
-                                       accepted_htlcs: to_forward_infos,
-                                       failed_htlcs: revoked_htlcs,
-                                       monitor_update,
-                                       holding_cell_failed_htlcs: htlcs_to_fail
-                               })
+                               self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                               self.pending_monitor_updates.push(monitor_update);
+                               Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
                        },
                        (None, htlcs_to_fail) => {
                                if require_commitment {
-                                       let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?;
+                                       let mut additional_update = self.build_commitment_no_status_check(logger);
 
-                                       // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                        // strictly increasing by one, so decrement it here.
                                        self.latest_monitor_update_id = monitor_update.update_id;
                                        monitor_update.updates.append(&mut additional_update.updates);
 
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
                                                log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
-                                       Ok(RAAUpdates {
-                                               commitment_update: Some(msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs,
-                                                       update_fail_malformed_htlcs,
-                                                       update_fee: None,
-                                                       commitment_signed
-                                               }),
-                                               finalized_claimed_htlcs,
-                                               accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
-                                               monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
-                                       })
+                                       self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                                       self.pending_monitor_updates.push(monitor_update);
+                                       Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
                                } else {
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
-                                       Ok(RAAUpdates {
-                                               commitment_update: None,
-                                               finalized_claimed_htlcs,
-                                               accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
-                                               monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
-                                       })
+                                       self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                                       self.pending_monitor_updates.push(monitor_update);
+                                       Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
                                }
                        }
                }
@@ -3768,15 +3743,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
-       /// This must be called immediately after the [`chain::Watch`] call which returned
-       /// [`ChannelMonitorUpdateStatus::InProgress`].
+       /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+       /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+       /// update completes (potentially immediately).
        /// The messages which were generated with the monitor update must *not* have been sent to the
        /// remote end, and must instead have been dropped. They will be regenerated when
        /// [`Self::monitor_updating_restored`] is called.
        ///
+       /// [`ChannelManager`]: super::channelmanager::ChannelManager
        /// [`chain::Watch`]: crate::chain::Watch
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
-       pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+       fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
                resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
@@ -3803,6 +3780,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+               self.pending_monitor_updates.clear();
 
                // If we're past (or at) the FundingSent stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
@@ -4280,7 +4258,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn shutdown<SP: Deref>(
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
-       ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+       ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        where SP::Target: SignerProvider
        {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4336,12 +4314,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let monitor_update = if update_shutdown_script {
                        self.latest_monitor_update_id += 1;
-                       Some(ChannelMonitorUpdate {
+                       let monitor_update = ChannelMonitorUpdate {
                                update_id: self.latest_monitor_update_id,
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
-                       })
+                       };
+                       self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                       self.pending_monitor_updates.push(monitor_update);
+                       Some(self.pending_monitor_updates.last().unwrap())
                } else { None };
                let shutdown = if send_shutdown {
                        Some(msgs::Shutdown {
@@ -4891,6 +4872,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
        }
 
+       pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> {
+               self.pending_monitor_updates.first()
+       }
+
        /// Returns true if funding_created was sent/received.
        pub fn is_funding_initiated(&self) -> bool {
                self.channel_state >= ChannelState::FundingSent as u32
@@ -5791,8 +5776,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(Some(res))
        }
 
-       /// Only fails in case of bad keys
-       fn send_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger {
+       fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
                log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
                // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
                // fail to generate this, we still are at least at a position where upgrading their status
@@ -5825,15 +5809,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
                self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
 
-               let (res, counterparty_commitment_txid, htlcs) = match self.send_commitment_no_state_update(logger) {
-                       Ok((res, (counterparty_commitment_tx, mut htlcs))) => {
-                               // Update state now that we've passed all the can-fail calls...
-                               let htlcs_no_ref: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
-                                       htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
-                               (res, counterparty_commitment_tx, htlcs_no_ref)
-                       },
-                       Err(e) => return Err(e),
-               };
+               let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
+               let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+                       htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
 
                if self.announcement_sigs_state == AnnouncementSigsState::MessageSent {
                        self.announcement_sigs_state = AnnouncementSigsState::Committed;
@@ -5850,16 +5828,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }]
                };
                self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
-               Ok((res, monitor_update))
+               monitor_update
        }
 
-       /// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
-       /// when we shouldn't change HTLC/channel state.
-       fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+       fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
                let counterparty_keys = self.build_remote_transaction_keys();
                let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
                let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
-               let (signature, htlc_signatures);
 
                #[cfg(any(test, fuzzing))]
                {
@@ -5879,6 +5854,21 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                }
 
+               (counterparty_commitment_txid, commitment_stats.htlcs_included)
+       }
+
+       /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
+       /// generation when we shouldn't change HTLC/channel state.
+       fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+               // Get the fee tests from `build_commitment_no_state_update`
+               #[cfg(any(test, fuzzing))]
+               self.build_commitment_no_state_update(logger);
+
+               let counterparty_keys = self.build_remote_transaction_keys();
+               let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+               let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+               let (signature, htlc_signatures);
+
                {
                        let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
                        for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
@@ -5911,16 +5901,20 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
        }
 
-       /// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction
-       /// to send to the remote peer in one go.
+       /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+       /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
        ///
        /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
-       /// [`Self::send_htlc`] and [`Self::send_commitment_no_state_update`] for more info.
-       pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> where L::Target: Logger {
-               match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger)? {
-                       Some(update_add_htlc) => {
-                               let (commitment_signed, monitor_update) = self.send_commitment_no_status_check(logger)?;
-                               Ok(Some((update_add_htlc, commitment_signed, monitor_update)))
+       /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+       pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+               let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+               if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+               match send_res? {
+                       Some(_) => {
+                               let monitor_update = self.build_commitment_no_status_check(logger);
+                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                               self.pending_monitor_updates.push(monitor_update);
+                               Ok(Some(self.pending_monitor_updates.last().unwrap()))
                        },
                        None => Ok(None)
                }
@@ -5946,8 +5940,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        /// Begins the shutdown process, getting a message for the remote peer and returning all
        /// holding cell HTLCs for payment failure.
-       pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option<u32>)
-       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+       ///
+       /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
+       /// [`ChannelMonitorUpdate`] will be returned).
+       pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+               target_feerate_sats_per_kw: Option<u32>)
+       -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        where SP::Target: SignerProvider {
                for htlc in self.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
@@ -5967,9 +5965,16 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
                }
 
+               // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
+               // script is set, we just force-close and call it a day.
+               let mut chan_closed = false;
+               if self.channel_state < ChannelState::FundingSent as u32 {
+                       chan_closed = true;
+               }
+
                let update_shutdown_script = match self.shutdown_scriptpubkey {
                        Some(_) => false,
-                       None => {
+                       None if !chan_closed => {
                                let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
                                if !shutdown_scriptpubkey.is_compatible(their_features) {
                                        return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
@@ -5977,6 +5982,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
                        },
+                       None => false,
                };
 
                // From here on out, we may not fail!
@@ -5990,12 +5996,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let monitor_update = if update_shutdown_script {
                        self.latest_monitor_update_id += 1;
-                       Some(ChannelMonitorUpdate {
+                       let monitor_update = ChannelMonitorUpdate {
                                update_id: self.latest_monitor_update_id,
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
-                       })
+                       };
+                       self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                       self.pending_monitor_updates.push(monitor_update);
+                       Some(self.pending_monitor_updates.last().unwrap())
                } else { None };
                let shutdown = msgs::Shutdown {
                        channel_id: self.channel_id,
@@ -6016,6 +6025,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                });
 
+               debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+                       "we can't both complete shutdown and return a monitor update");
+
                Ok((shutdown, monitor_update, dropped_outbound_htlcs))
        }
 
@@ -6877,6 +6889,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                        channel_type: channel_type.unwrap(),
                        channel_keys_id,
+
+                       pending_monitor_updates: Vec::new(),
                })
        }
 }
@@ -7188,7 +7202,7 @@ mod tests {
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
-               let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
 
                // Node B --> Node A: funding signed
                let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
index 3fd472925cf6948ec56a88986ed97a1368e99963..577e0984448a257a2ff9a23f305aa2684ef968dc 100644 (file)
@@ -65,6 +65,8 @@ use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, Maybe
 use crate::util::logger::{Level, Logger};
 use crate::util::errors::APIError;
 
+use alloc::collections::BTreeMap;
+
 use crate::io;
 use crate::prelude::*;
 use core::{cmp, mem};
@@ -343,17 +345,6 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn ignore_no_close(err: String) -> Self {
-               Self {
-                       err: LightningError {
-                               err,
-                               action: msgs::ErrorAction::IgnoreError,
-                       },
-                       chan_id: None,
-                       shutdown_finish: None,
-               }
-       }
-       #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
                Self { err, chan_id: None, shutdown_finish: None }
        }
@@ -464,6 +455,7 @@ enum BackgroundEvent {
        ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
 }
 
+#[derive(Debug)]
 pub(crate) enum MonitorUpdateCompletionAction {
        /// Indicates that a payment ultimately destined for us was claimed and we should emit an
        /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
@@ -474,6 +466,11 @@ pub(crate) enum MonitorUpdateCompletionAction {
        EmitEvent { event: events::Event },
 }
 
+impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
+       (0, PaymentClaimed) => { (0, payment_hash, required) },
+       (2, EmitEvent) => { (0, event, ignorable) },
+);
+
 /// State we hold per-peer.
 pub(super) struct PeerState<Signer: ChannelSigner> {
        /// `temporary_channel_id` or `channel_id` -> `channel`.
@@ -487,6 +484,21 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
        /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
+       /// Map from a specific channel to some action(s) that should be taken when all pending
+       /// [`ChannelMonitorUpdate`]s for the channel complete updating.
+       ///
+       /// Note that because we generally only have one entry here a HashMap is pretty overkill. A
+       /// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
+       /// channels with a peer this will just be one allocation and will amount to a linear list of
+       /// channels to walk, avoiding the whole hashing rigmarole.
+       ///
+       /// Note that the channel may no longer exist. For example, if a channel was closed but we
+       /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
+       /// for a missing channel. While a malicious peer could construct a second channel with the
+       /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
+       /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
+       /// duplicates do not occur, so such channels should fail without a monitor update completing.
+       monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
@@ -501,7 +513,7 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
                if require_disconnected && self.is_connected {
                        return false
                }
-               self.channel_by_id.len() == 0
+               self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
        }
 }
 
@@ -593,6 +605,15 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C
 /// offline for a full minute. In order to track this, you must call
 /// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
 ///
+/// To avoid trivial DoS issues, ChannelManager limits the number of inbound connections and
+/// inbound channels without confirmed funding transactions. This may result in nodes which we do
+/// not have a channel with being unable to connect to us or open new channels with us if we have
+/// many peers with unfunded channels.
+///
+/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
+/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
+/// never limited. Please ensure you limit the count of such channels yourself.
+///
 /// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
 /// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
 /// essentially you should default to using a SimpleRefChannelManager, and use a
@@ -943,6 +964,19 @@ pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
 /// [`OutboundPayments::remove_stale_resolved_payments`].
 pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
 
+/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
+/// (inbound) ones. The number of peers with unfunded channels is limited separately in
+/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
+const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
+
+/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
+/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
+const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
+
+/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
+/// many peers we reject new (inbound) connections.
+const MAX_NO_CHANNEL_PEERS: usize = 250;
+
 /// Information needed for constructing an invoice route hint for this channel.
 #[derive(Clone, Debug, PartialEq)]
 pub struct CounterpartyForwardingInfo {
@@ -1345,78 +1379,6 @@ macro_rules! remove_channel {
        }
 }
 
-macro_rules! handle_monitor_update_res {
-       ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
-               match $err {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {
-                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
-                               update_maps_on_chan_removal!($self, $chan);
-                               // TODO: $failed_fails is dropped here, which will cause other channels to hit the
-                               // chain in a confused state! We need to move them into the ChannelMonitor which
-                               // will be responsible for failing backwards once things confirm on-chain.
-                               // It's ok that we drop $failed_forwards here - at this point we'd rather they
-                               // broadcast HTLC-Timeout and pay the associated fees to get their funds back than
-                               // us bother trying to claim it just to forward on to another peer. If we're
-                               // splitting hairs we'd prefer to claim payments that were to us, but we haven't
-                               // given up the preimage yet, so might as well just wait until the payment is
-                               // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
-                                               $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
-                               (res, true)
-                       },
-                       ChannelMonitorUpdateStatus::InProgress => {
-                               log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
-                                               log_bytes!($chan_id[..]),
-                                               if $resend_commitment && $resend_raa {
-                                                               match $action_type {
-                                                                       RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
-                                                                       RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
-                                                               }
-                                                       } else if $resend_commitment { "commitment" }
-                                                       else if $resend_raa { "RAA" }
-                                                       else { "nothing" },
-                                               (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
-                                               (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
-                                               (&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
-                               if !$resend_commitment {
-                                       debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
-                               }
-                               if !$resend_raa {
-                                       debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
-                               }
-                               $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
-                               (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
-                       },
-                       ChannelMonitorUpdateStatus::Completed => {
-                               (Ok(()), false)
-                       },
-               }
-       };
-       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
-               if drop {
-                       $entry.remove_entry();
-               }
-               res
-       } };
-       ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
-               debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
-               handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
-       } };
-       ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
-               handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
-       };
-       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
-               handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
-       };
-       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
-       };
-       ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
-       };
-}
-
 macro_rules! send_channel_ready {
        ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
                $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
@@ -1454,6 +1416,93 @@ macro_rules! emit_channel_ready_event {
        }
 }
 
+macro_rules! handle_monitor_update_completion {
+       ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
+               let mut updates = $chan.monitor_updating_restored(&$self.logger,
+                       &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
+                       $self.best_block.read().unwrap().height());
+               let counterparty_node_id = $chan.get_counterparty_node_id();
+               let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
+                       // We only send a channel_update in the case where we are just now sending a
+                       // channel_ready and the channel is in a usable state. We may re-send a
+                       // channel_update later through the announcement_signatures process for public
+                       // channels, but there's no reason not to just inform our counterparty of our fees
+                       // now.
+                       if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
+                               Some(events::MessageSendEvent::SendChannelUpdate {
+                                       node_id: counterparty_node_id,
+                                       msg,
+                               })
+                       } else { None }
+               } else { None };
+
+               let update_actions = $peer_state.monitor_update_blocked_actions
+                       .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+
+               let htlc_forwards = $self.handle_channel_resumption(
+                       &mut $peer_state.pending_msg_events, $chan, updates.raa,
+                       updates.commitment_update, updates.order, updates.accepted_htlcs,
+                       updates.funding_broadcastable, updates.channel_ready,
+                       updates.announcement_sigs);
+               if let Some(upd) = channel_update {
+                       $peer_state.pending_msg_events.push(upd);
+               }
+
+               let channel_id = $chan.channel_id();
+               core::mem::drop($peer_state_lock);
+
+               $self.handle_monitor_update_completion_actions(update_actions);
+
+               if let Some(forwards) = htlc_forwards {
+                       $self.forward_htlcs(&mut [forwards][..]);
+               }
+               $self.finalize_claims(updates.finalized_claimed_htlcs);
+               for failure in updates.failed_htlcs.drain(..) {
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+                       $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
+               }
+       } }
+}
+
+macro_rules! handle_new_monitor_update {
+       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+               // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
+               // any case so that it won't deadlock.
+               debug_assert!($self.id_to_peer.try_lock().is_ok());
+               match $update_res {
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
+                                       log_bytes!($chan.channel_id()[..]));
+                               Ok(())
+                       },
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
+                                       log_bytes!($chan.channel_id()[..]));
+                               update_maps_on_chan_removal!($self, $chan);
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
+                                       "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
+                                       $chan.get_user_id(), $chan.force_shutdown(false),
+                                       $self.get_channel_update_for_broadcast(&$chan).ok()));
+                               $remove;
+                               res
+                       },
+                       ChannelMonitorUpdateStatus::Completed => {
+                               if ($update_id == 0 || $chan.get_next_monitor_update()
+                                       .expect("We can't be processing a monitor update if it isn't queued")
+                                       .update_id == $update_id) &&
+                                       $chan.get_latest_monitor_update_id() == $update_id
+                               {
+                                       handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $chan);
+                               }
+                               Ok(())
+                       },
+               }
+       } };
+       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
+               handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+       }
+}
+
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
        M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
@@ -1768,25 +1817,27 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let their_features = &peer_state.latest_features;
+                                       let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+                                               .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
                                        failed_htlcs = htlcs;
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update {
-                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
-                                               let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                               if is_permanent {
-                                                       remove_channel!(self, chan_entry);
-                                                       break result;
-                                               }
-                                       }
-
+                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                       // here as we don't need the monitor update to complete until we send a
+                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                node_id: *counterparty_node_id,
-                                               msg: shutdown_msg
+                                               msg: shutdown_msg,
                                        });
 
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update_opt.take() {
+                                               let update_id = monitor_update.update_id;
+                                               let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+                                       }
+
                                        if chan_entry.get().is_shutdown() {
                                                let channel = remove_channel!(self, chan_entry);
                                                if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
@@ -2390,54 +2441,35 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
-                               match {
-                                       if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
-                                       }
-                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
-                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                                       path: path.clone(),
-                                                       session_priv: session_priv.clone(),
-                                                       first_hop_htlc_msat: htlc_msat,
-                                                       payment_id,
-                                                       payment_secret: payment_secret.clone(),
-                                                       payment_params: payment_params.clone(),
-                                               }, onion_packet, &self.logger),
-                                               chan)
-                               } {
-                                       Some((update_add, commitment_signed, monitor_update)) => {
-                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
-                                               let chan_id = chan.get().channel_id();
-                                               match (update_err,
-                                                       handle_monitor_update_res!(self, update_err, chan,
-                                                               RAACommitmentOrder::CommitmentFirst, false, true))
-                                               {
-                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
-                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
-                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
-                                                               // Note that MonitorUpdateInProgress here indicates (per function
-                                                               // docs) that we will resend the commitment update once monitor
-                                                               // updating completes. Therefore, we must return an error
-                                                               // indicating that it is unsafe to retry the payment wholesale,
-                                                               // which we do in the send_payment check for
-                                                               // MonitorUpdateInProgress, below.
-                                                               return Err(APIError::MonitorUpdateInProgress);
-                                                       },
-                                                       _ => unreachable!(),
+                               if !chan.get().is_live() {
+                                       return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
+                               }
+                               let funding_txo = chan.get().get_funding_txo().unwrap();
+                               let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+                                       htlc_cltv, HTLCSource::OutboundRoute {
+                                               path: path.clone(),
+                                               session_priv: session_priv.clone(),
+                                               first_hop_htlc_msat: htlc_msat,
+                                               payment_id,
+                                               payment_secret: payment_secret.clone(),
+                                               payment_params: payment_params.clone(),
+                                       }, onion_packet, &self.logger);
+                               match break_chan_entry!(self, send_res, chan) {
+                                       Some(monitor_update) => {
+                                               let update_id = monitor_update.update_id;
+                                               let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
+                                               if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan) {
+                                                       break Err(e);
+                                               }
+                                               if update_res == ChannelMonitorUpdateStatus::InProgress {
+                                                       // Note that MonitorUpdateInProgress here indicates (per function
+                                                       // docs) that we will resend the commitment update once monitor
+                                                       // updating completes. Therefore, we must return an error
+                                                       // indicating that it is unsafe to retry the payment wholesale,
+                                                       // which we do in the send_payment check for
+                                                       // MonitorUpdateInProgress, below.
+                                                       return Err(APIError::MonitorUpdateInProgress);
                                                }
-
-                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: path.first().unwrap().pubkey,
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: vec![update_add],
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       },
-                                               });
                                        },
                                        None => { },
                                }
@@ -2667,7 +2699,7 @@ where
                                        (chan, funding_msg)
                                },
                                Err(_) => { return Err(APIError::ChannelUnavailable {
-                                       err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+                                       err: "Signer refused to sign the initial commitment transaction".to_owned()
                                }) },
                        }
                };
@@ -3936,7 +3968,6 @@ where
 
                let per_peer_state = self.per_peer_state.read().unwrap();
                let chan_id = prev_hop.outpoint.to_channel_id();
-
                let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
                        Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
                        None => None
@@ -3948,99 +3979,57 @@ where
                        )
                ).unwrap_or(None);
 
-               if let Some(hash_map::Entry::Occupied(mut chan)) = peer_state_opt.as_mut().map(|peer_state| peer_state.channel_by_id.entry(chan_id))
-               {
-                       let counterparty_node_id = chan.get().get_counterparty_node_id();
-                       match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok(msgs_monitor_option) => {
-                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
-                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
-                                                       ChannelMonitorUpdateStatus::Completed => {},
-                                                       e => {
-                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
-                                                                       "Failed to update channel monitor with preimage {:?}: {:?}",
-                                                                       payment_preimage, e);
-                                                               let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
-                                                               mem::drop(peer_state_opt);
-                                                               mem::drop(per_peer_state);
-                                                               self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
-                                                               return Err((counterparty_node_id, err));
-                                                       }
-                                               }
-                                               if let Some((msg, commitment_signed)) = msgs {
-                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
-                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                                       peer_state_opt.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                               node_id: counterparty_node_id,
-                                                               updates: msgs::CommitmentUpdate {
-                                                                       update_add_htlcs: Vec::new(),
-                                                                       update_fulfill_htlcs: vec![msg],
-                                                                       update_fail_htlcs: Vec::new(),
-                                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                                       update_fee: None,
-                                                                       commitment_signed,
-                                                               }
-                                                       });
-                                               }
-                                               mem::drop(peer_state_opt);
-                                               mem::drop(per_peer_state);
-                                               self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
-                                               Ok(())
-                                       } else {
-                                               Ok(())
-                                       }
-                               },
-                               Err((e, monitor_update)) => {
-                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
-                                               ChannelMonitorUpdateStatus::Completed => {},
-                                               e => {
-                                                       // TODO: This needs to be handled somehow - if we receive a monitor update
-                                                       // with a preimage we *must* somehow manage to propagate it to the upstream
-                                                       // channel, or we must have an ability to receive the same update and try
-                                                       // again on restart.
-                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
-                                                               "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
-                                                               payment_preimage, e);
-                                               },
+               if let Some(mut peer_state_lock) = peer_state_opt.take() {
+                       let peer_state = &mut *peer_state_lock;
+                       if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+                               let counterparty_node_id = chan.get().get_counterparty_node_id();
+                               let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+                               if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+                                       if let Some(action) = completion_action(Some(htlc_value_msat)) {
+                                               log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                       log_bytes!(chan_id), action);
+                                               peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                        }
-                                       let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
-                                       if drop {
-                                               chan.remove_entry();
+                                       let update_id = monitor_update.update_id;
+                                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+                                       let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                               peer_state, chan);
+                                       if let Err(e) = res {
+                                               // TODO: This is a *critical* error - we probably updated the outbound edge
+                                               // of the HTLC's monitor with a preimage. We should retry this monitor
+                                               // update over and over again until morale improves.
+                                               log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+                                               return Err((counterparty_node_id, e));
                                        }
-                                       mem::drop(peer_state_opt);
-                                       mem::drop(per_peer_state);
-                                       self.handle_monitor_update_completion_actions(completion_action(None));
-                                       Err((counterparty_node_id, res))
-                               },
-                       }
-               } else {
-                       let preimage_update = ChannelMonitorUpdate {
-                               update_id: CLOSED_CHANNEL_UPDATE_ID,
-                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                       payment_preimage,
-                               }],
-                       };
-                       // We update the ChannelMonitor on the backward link, after
-                       // receiving an `update_fulfill_htlc` from the forward link.
-                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
-                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                               // TODO: This needs to be handled somehow - if we receive a monitor update
-                               // with a preimage we *must* somehow manage to propagate it to the upstream
-                               // channel, or we must have an ability to receive the same event and try
-                               // again on restart.
-                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                       payment_preimage, update_res);
+                               }
+                               return Ok(());
                        }
-                       mem::drop(peer_state_opt);
-                       mem::drop(per_peer_state);
-                       // Note that we do process the completion action here. This totally could be a
-                       // duplicate claim, but we have no way of knowing without interrogating the
-                       // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
-                       // generally always allowed to be duplicative (and it's specifically noted in
-                       // `PaymentForwarded`).
-                       self.handle_monitor_update_completion_actions(completion_action(None));
-                       Ok(())
                }
+               let preimage_update = ChannelMonitorUpdate {
+                       update_id: CLOSED_CHANNEL_UPDATE_ID,
+                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                               payment_preimage,
+                       }],
+               };
+               // We update the ChannelMonitor on the backward link, after
+               // receiving an `update_fulfill_htlc` from the forward link.
+               let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
+               if update_res != ChannelMonitorUpdateStatus::Completed {
+                       // TODO: This needs to be handled somehow - if we receive a monitor update
+                       // with a preimage we *must* somehow manage to propagate it to the upstream
+                       // channel, or we must have an ability to receive the same event and try
+                       // again on restart.
+                       log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                               payment_preimage, update_res);
+               }
+               // Note that we do process the completion action here. This totally could be a
+               // duplicate claim, but we have no way of knowing without interrogating the
+               // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+               // generally always allowed to be duplicative (and it's specifically noted in
+               // `PaymentForwarded`).
+               self.handle_monitor_update_completion_actions(completion_action(None));
+               Ok(())
        }
 
        fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -4111,6 +4100,14 @@ where
                pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+               log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+                       log_bytes!(channel.channel_id()),
+                       if raa.is_some() { "an" } else { "no" },
+                       if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
+                       if funding_broadcastable.is_some() { "" } else { "not " },
+                       if channel_ready.is_some() { "sending" } else { "without" },
+                       if announcement_sigs.is_some() { "sending" } else { "without" });
+
                let mut htlc_forwards = None;
 
                let counterparty_node_id = channel.get_counterparty_node_id();
@@ -4169,65 +4166,36 @@ where
        fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let htlc_forwards;
-               let (mut pending_failures, finalized_claims, counterparty_node_id) = {
-                       let counterparty_node_id = match counterparty_node_id {
-                               Some(cp_id) => cp_id.clone(),
-                               None => {
-                                       // TODO: Once we can rely on the counterparty_node_id from the
-                                       // monitor event, this and the id_to_peer map should be removed.
-                                       let id_to_peer = self.id_to_peer.lock().unwrap();
-                                       match id_to_peer.get(&funding_txo.to_channel_id()) {
-                                               Some(cp_id) => cp_id.clone(),
-                                               None => return,
-                                       }
-                               }
-                       };
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       let mut peer_state_lock;
-                       let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
-                       if peer_state_mutex_opt.is_none() { return }
-                       peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       let mut channel = {
-                               match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
-                                       hash_map::Entry::Occupied(chan) => chan,
-                                       hash_map::Entry::Vacant(_) => return,
+               let counterparty_node_id = match counterparty_node_id {
+                       Some(cp_id) => cp_id.clone(),
+                       None => {
+                               // TODO: Once we can rely on the counterparty_node_id from the
+                               // monitor event, this and the id_to_peer map should be removed.
+                               let id_to_peer = self.id_to_peer.lock().unwrap();
+                               match id_to_peer.get(&funding_txo.to_channel_id()) {
+                                       Some(cp_id) => cp_id.clone(),
+                                       None => return,
                                }
-                       };
-                       if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
-                               return;
                        }
-
-                       let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height());
-                       let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
-                               // We only send a channel_update in the case where we are just now sending a
-                               // channel_ready and the channel is in a usable state. We may re-send a
-                               // channel_update later through the announcement_signatures process for public
-                               // channels, but there's no reason not to just inform our counterparty of our fees
-                               // now.
-                               if let Ok(msg) = self.get_channel_update_for_unicast(channel.get()) {
-                                       Some(events::MessageSendEvent::SendChannelUpdate {
-                                               node_id: channel.get().get_counterparty_node_id(),
-                                               msg,
-                                       })
-                               } else { None }
-                       } else { None };
-                       htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
-                       if let Some(upd) = channel_update {
-                               peer_state.pending_msg_events.push(upd);
+               };
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let mut peer_state_lock;
+               let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+               if peer_state_mutex_opt.is_none() { return }
+               peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               let mut channel = {
+                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
+                               hash_map::Entry::Occupied(chan) => chan,
+                               hash_map::Entry::Vacant(_) => return,
                        }
-
-                       (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
-               if let Some(forwards) = htlc_forwards {
-                       self.forward_htlcs(&mut [forwards][..]);
-               }
-               self.finalize_claims(finalized_claims);
-               for failure in pending_failures.drain(..) {
-                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
-                       self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
+               log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
+                       highest_applied_update_id, channel.get().get_latest_monitor_update_id());
+               if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
+                       return;
                }
+               handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, channel.get_mut());
        }
 
        /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -4275,11 +4243,13 @@ where
        fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
+               let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
+               let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
                match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
                        hash_map::Entry::Occupied(mut channel) => {
                                if !channel.get().inbound_is_awaiting_accept() {
@@ -4297,6 +4267,21 @@ where
                                        peer_state.pending_msg_events.push(send_msg_err_event);
                                        let _ = remove_channel!(self, channel);
                                        return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                               } else {
+                                       // If this peer already has some channels, a new channel won't increase our number of peers
+                                       // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+                                       // channels per-peer we can accept channels from a peer with existing ones.
+                                       if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+                                               let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                                       node_id: channel.get().get_counterparty_node_id(),
+                                                       action: msgs::ErrorAction::SendErrorMessage{
+                                                               msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+                                                       }
+                                               };
+                                               peer_state.pending_msg_events.push(send_msg_err_event);
+                                               let _ = remove_channel!(self, channel);
+                                               return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+                                       }
                                }
 
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
@@ -4311,6 +4296,43 @@ where
                Ok(())
        }
 
+       /// Gets the number of peers which match the given filter and do not have any funded, outbound,
+       /// or 0-conf channels.
+       ///
+       /// The filter is called for each peer and provided with the number of unfunded, inbound, and
+       /// non-0-conf channels we have with the peer.
+       fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
+       where Filter: Fn(&PeerState<<SP::Target as SignerProvider>::Signer>) -> bool {
+               let mut peers_without_funded_channels = 0;
+               let best_block_height = self.best_block.read().unwrap().height();
+               {
+                       let peer_state_lock = self.per_peer_state.read().unwrap();
+                       for (_, peer_mtx) in peer_state_lock.iter() {
+                               let peer = peer_mtx.lock().unwrap();
+                               if !maybe_count_peer(&*peer) { continue; }
+                               let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
+                               if num_unfunded_channels == peer.channel_by_id.len() {
+                                       peers_without_funded_channels += 1;
+                               }
+                       }
+               }
+               return peers_without_funded_channels;
+       }
+
+       fn unfunded_channel_count(
+               peer: &PeerState<<SP::Target as SignerProvider>::Signer>, best_block_height: u32
+       ) -> usize {
+               let mut num_unfunded_channels = 0;
+               for (_, chan) in peer.channel_by_id.iter() {
+                       if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
+                               chan.get_funding_tx_confirmations(best_block_height) == 0
+                       {
+                               num_unfunded_channels += 1;
+                       }
+               }
+               num_unfunded_channels
+       }
+
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                if msg.chain_hash != self.genesis_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
@@ -4323,8 +4345,13 @@ where
                let mut random_bytes = [0u8; 16];
                random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
                let user_channel_id = u128::from_be_bytes(random_bytes);
-
                let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+
+               // Get the number of peers with channels, but without funded ones. We don't care too much
+               // about peers that never open a channel, so we filter by peers that have at least one
+               // channel, and then limit the number of those with unfunded channels.
+               let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
+
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                    .ok_or_else(|| {
@@ -4333,9 +4360,29 @@ where
                        })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
+
+               // If this peer already has some channels, a new channel won't increase our number of peers
+               // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+               // channels per-peer we can accept channels from a peer with existing ones.
+               if peer_state.channel_by_id.is_empty() &&
+                       channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
+                       !self.default_configuration.manually_accept_inbound_channels
+               {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
+                               msg.temporary_channel_id.clone()));
+               }
+
+               let best_block_height = self.best_block.read().unwrap().height();
+               if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
+                               msg.temporary_channel_id.clone()));
+               }
+
                let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
-                       counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration,
-                       self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
+                       counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
+                       &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
                {
                        Err(e) => {
                                self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
@@ -4406,60 +4453,31 @@ where
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
+               let best_block = *self.best_block.read().unwrap();
+
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
                                debug_assert!(false);
                                MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
                        })?;
-               let ((funding_msg, monitor, mut channel_ready), mut chan) = {
-                       let best_block = *self.best_block.read().unwrap();
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
+
+               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               let ((funding_msg, monitor), chan) =
                        match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
-                       }
-               };
-               // Because we have exclusive ownership of the channel here we can release the peer_state
-               // lock before watch_channel
-               match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
-                       ChannelMonitorUpdateStatus::Completed => {},
-                       ChannelMonitorUpdateStatus::PermanentFailure => {
-                               // Note that we reply with the new channel_id in error messages if we gave up on the
-                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                               // any messages referencing a previously-closed channel anyway.
-                               // We do not propagate the monitor update to the user as it would be for a monitor
-                               // that we didn't manage to store (and that we don't care about - we don't respond
-                               // with the funding_signed so the channel can never go on chain).
-                               let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
-                               assert!(failed_htlcs.is_empty());
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
-                       },
-                       ChannelMonitorUpdateStatus::InProgress => {
-                               // There's no problem signing a counterparty's funding transaction if our monitor
-                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
-                               // until we have persisted our monitor.
-                               chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
-                               channel_ready = None; // Don't send the channel_ready now
-                       },
-               }
-               // It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
-               // peer exists, despite the inner PeerState potentially having no channels after removing
-               // the channel above.
-               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-               let peer_state = &mut *peer_state_lock;
+                       };
+
                match peer_state.channel_by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
+                               Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               match id_to_peer.entry(chan.channel_id()) {
+                               match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
@@ -4469,63 +4487,66 @@ where
                                                i_e.insert(chan.get_counterparty_node_id());
                                        }
                                }
+
+                               // There's no problem signing a counterparty's funding transaction if our monitor
+                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
+                               // until we have persisted our monitor.
+                               let new_channel_id = funding_msg.channel_id;
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
-                               if let Some(msg) = channel_ready {
-                                       send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
+
+                               let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+
+                               let chan = e.insert(chan);
+                               let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+
+                               // Note that we reply with the new channel_id in error messages if we gave up on the
+                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                               // any messages referencing a previously-closed channel anyway.
+                               // We do not propagate the monitor update to the user as it would be for a monitor
+                               // that we didn't manage to store (and that we don't care about - we don't respond
+                               // with the funding_signed so the channel can never go on chain).
+                               if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+                                       res.0 = None;
                                }
-                               e.insert(chan);
+                               res
                        }
                }
-               Ok(())
        }
 
        fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
-               let funding_tx = {
-                       let best_block = *self.best_block.read().unwrap();
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                               .ok_or_else(|| {
-                                       debug_assert!(false);
-                                       MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
-                               })?;
+               let best_block = *self.best_block.read().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+                       .ok_or_else(|| {
+                               debug_assert!(false);
+                               MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+                       })?;
 
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.channel_id) {
-                               hash_map::Entry::Occupied(mut chan) => {
-                                       let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) {
-                                               Ok(update) => update,
-                                               Err(e) => try_chan_entry!(self, Err(e), chan),
-                                       };
-                                       match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               ChannelMonitorUpdateStatus::Completed => {},
-                                               e => {
-                                                       let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
-                                                       if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                               // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                               // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                               // monitor update contained within `shutdown_finish` was applied.
-                                                               if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                                       shutdown_finish.0.take();
-                                                               }
-                                                       }
-                                                       return res
-                                               },
-                                       }
-                                       if let Some(msg) = channel_ready {
-                                               send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
+               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.entry(msg.channel_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               let monitor = try_chan_entry!(self,
+                                       chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
+                               let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
+                               let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, chan);
+                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                       // We weren't able to watch the channel to begin with, so no updates should be made on
+                                       // it. Previously, full_stack_target found an (unreachable) panic when the
+                                       // monitor update contained within `shutdown_finish` was applied.
+                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                               shutdown_finish.0.take();
                                        }
-                                       funding_tx
-                               },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
-                       }
-               };
-               log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
-               self.tx_broadcaster.broadcast_transaction(&funding_tx);
-               Ok(())
+                               }
+                               res
+                       },
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+               }
        }
 
        fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
@@ -4590,27 +4611,27 @@ where
                                                        if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
                                        }
 
-                                       let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+                                               chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
                                        dropped_htlcs = htlcs;
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update {
-                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
-                                               let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                               if is_permanent {
-                                                       remove_channel!(self, chan_entry);
-                                                       break result;
-                                               }
-                                       }
-
                                        if let Some(msg) = shutdown {
+                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                               // here as we don't need the monitor update to complete until we send a
+                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: *counterparty_node_id,
                                                        msg,
                                                });
                                        }
 
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update_opt {
+                                               let update_id = monitor_update.update_id;
+                                               let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+                                       }
                                        break Ok(());
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -4622,8 +4643,7 @@ where
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
 
-               let _ = handle_error!(self, result, *counterparty_node_id);
-               Ok(())
+               result
        }
 
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
@@ -4797,40 +4817,12 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               let (revoke_and_ack, commitment_signed, monitor_update) =
-                                       match chan.get_mut().commitment_signed(&msg, &self.logger) {
-                                               Err((None, e)) => try_chan_entry!(self, Err(e), chan),
-                                               Err((Some(update), e)) => {
-                                                       assert!(chan.get().is_awaiting_monitor_update());
-                                                       let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update);
-                                                       try_chan_entry!(self, Err(e), chan);
-                                                       unreachable!();
-                                               },
-                                               Ok(res) => res
-                                       };
-                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
-                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
-                                       return Err(e);
-                               }
-
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
-                                       node_id: counterparty_node_id.clone(),
-                                       msg: revoke_and_ack,
-                               });
-                               if let Some(msg) = commitment_signed {
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: counterparty_node_id.clone(),
-                                               updates: msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs: Vec::new(),
-                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                       update_fee: None,
-                                                       commitment_signed: msg,
-                                               },
-                                       });
-                               }
-                               Ok(())
+                               let funding_txo = chan.get().get_funding_txo();
+                               let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+                               let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+                               let update_id = monitor_update.update_id;
+                               handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                       peer_state, chan)
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -4934,8 +4926,7 @@ where
        }
 
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
-               let mut htlcs_to_fail = Vec::new();
-               let res = loop {
+               let (htlcs_to_fail, res) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
@@ -4946,59 +4937,19 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
-                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
-                                       let raa_updates = break_chan_entry!(self,
-                                               chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
-                                       htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update);
-                                       if was_paused_for_mon_update {
-                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
-                                               assert!(raa_updates.commitment_update.is_none());
-                                               assert!(raa_updates.accepted_htlcs.is_empty());
-                                               assert!(raa_updates.failed_htlcs.is_empty());
-                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
-                                       }
-                                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
-                                                               RAACommitmentOrder::CommitmentFirst, false,
-                                                               raa_updates.commitment_update.is_some(), false,
-                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                               raa_updates.finalized_claimed_htlcs) {
-                                                       break Err(e);
-                                               } else { unreachable!(); }
-                                       }
-                                       if let Some(updates) = raa_updates.commitment_update {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       updates,
-                                               });
-                                       }
-                                       break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                       raa_updates.finalized_claimed_htlcs,
-                                                       chan.get().get_short_channel_id()
-                                                               .unwrap_or(chan.get().outbound_scid_alias()),
-                                                       chan.get().get_funding_txo().unwrap(),
-                                                       chan.get().get_user_id()))
+                                       let funding_txo = chan.get().get_funding_txo();
+                                       let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+                                       let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+                                       let update_id = monitor_update.update_id;
+                                       let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                               peer_state, chan);
+                                       (htlcs_to_fail, res)
                                },
-                               hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
-               match res {
-                       Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
-                               short_channel_id, channel_outpoint, user_channel_id)) =>
-                       {
-                               for failure in pending_failures.drain(..) {
-                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
-                                       self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
-                               }
-                               self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
-                               self.finalize_claims(finalized_claim_htlcs);
-                               Ok(())
-                       },
-                       Err(e) => Err(e)
-               }
+               res
        }
 
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
@@ -5240,49 +5191,37 @@ where
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
                let mut handle_errors = Vec::new();
-               {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                       'chan_loop: loop {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|channel_id, chan| {
-                                       match chan.maybe_free_holding_cell_htlcs(&self.logger) {
-                                               Ok((commitment_opt, holding_cell_failed_htlcs)) => {
-                                                       if !holding_cell_failed_htlcs.is_empty() {
-                                                               failed_htlcs.push((
-                                                                       holding_cell_failed_htlcs,
-                                                                       *channel_id,
-                                                                       chan.get_counterparty_node_id()
-                                                               ));
-                                                       }
-                                                       if let Some((commitment_update, monitor_update)) = commitment_opt {
-                                                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) {
-                                                                       ChannelMonitorUpdateStatus::Completed => {
-                                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                                       node_id: chan.get_counterparty_node_id(),
-                                                                                       updates: commitment_update,
-                                                                               });
-                                                                       },
-                                                                       e => {
-                                                                               has_monitor_update = true;
-                                                                               let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
-                                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
-                                                                               if close_channel { return false; }
-                                                                       },
-                                                               }
-                                                       }
-                                                       true
-                                               },
-                                               Err(e) => {
-                                                       let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                                       handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
-                                                       // ChannelClosed event is generated by handle_error for us
-                                                       !close_channel
+                               let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
+                               for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                       let counterparty_node_id = chan.get_counterparty_node_id();
+                                       let funding_txo = chan.get_funding_txo();
+                                       let (monitor_opt, holding_cell_failed_htlcs) =
+                                               chan.maybe_free_holding_cell_htlcs(&self.logger);
+                                       if !holding_cell_failed_htlcs.is_empty() {
+                                               failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
+                                       }
+                                       if let Some(monitor_update) = monitor_opt {
+                                               has_monitor_update = true;
+
+                                               let update_res = self.chain_monitor.update_channel(
+                                                       funding_txo.expect("channel is live"), monitor_update);
+                                               let update_id = monitor_update.update_id;
+                                               let channel_id: [u8; 32] = *channel_id;
+                                               let res = handle_new_monitor_update!(self, update_res, update_id,
+                                                       peer_state_lock, peer_state, chan, MANUALLY_REMOVING,
+                                                       peer_state.channel_by_id.remove(&channel_id));
+                                               if res.is_err() {
+                                                       handle_errors.push((counterparty_node_id, res));
                                                }
+                                               continue 'chan_loop;
                                        }
-                               });
+                               }
+                               break 'chan_loop;
                        }
                }
 
@@ -6246,13 +6185,13 @@ where
                let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
        }
 
-       fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
+       fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
-                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
-                               log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
+                               log_pubkey!(counterparty_node_id));
                        if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
@@ -6294,7 +6233,7 @@ where
                                debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
                                peer_state.is_connected = false;
                                peer_state.ok_to_remove(true)
-                       } else { true }
+                       } else { debug_assert!(false, "Unconnected peer disconnected"); true }
                };
                if remove_peer {
                        per_peer_state.remove(counterparty_node_id);
@@ -6306,38 +6245,57 @@ where
                }
        }
 
-       fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
                if !init_msg.features.supports_static_remote_key() {
-                       log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
+                       log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
                        return Err(());
                }
 
-               log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
-
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
+               // If we have too many peers connected which don't have funded channels, disconnect the
+               // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+               // unfunded channels taking up space in memory for disconnected peers, we still let new
+               // peers connect, but we'll reject new channels from them.
+               let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+               let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+
                {
                        let mut peer_state_lock = self.per_peer_state.write().unwrap();
                        match peer_state_lock.entry(counterparty_node_id.clone()) {
                                hash_map::Entry::Vacant(e) => {
+                                       if inbound_peer_limited {
+                                               return Err(());
+                                       }
                                        e.insert(Mutex::new(PeerState {
                                                channel_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
                                                pending_msg_events: Vec::new(),
+                                               monitor_update_blocked_actions: BTreeMap::new(),
                                                is_connected: true,
                                        }));
                                },
                                hash_map::Entry::Occupied(e) => {
                                        let mut peer_state = e.get().lock().unwrap();
                                        peer_state.latest_features = init_msg.features.clone();
+
+                                       let best_block_height = self.best_block.read().unwrap().height();
+                                       if inbound_peer_limited &&
+                                               Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+                                               peer_state.channel_by_id.len()
+                                       {
+                                               return Err(());
+                                       }
+
                                        debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
                                        peer_state.is_connected = true;
                                },
                        }
                }
 
-               let per_peer_state = self.per_peer_state.read().unwrap();
+               log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
+               let per_peer_state = self.per_peer_state.read().unwrap();
                for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
@@ -6951,10 +6909,14 @@ where
                        htlc_purposes.push(purpose);
                }
 
+               let mut monitor_update_blocked_actions_per_peer = None;
+               let mut peer_states = Vec::new();
+               for (_, peer_state_mutex) in per_peer_state.iter() {
+                       peer_states.push(peer_state_mutex.lock().unwrap());
+               }
+
                (serializable_peer_count).write(writer)?;
-               for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
-                       let peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &*peer_state_lock;
+               for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
                        // Peers which we have no channels to should be dropped once disconnected. As we
                        // disconnect all peers when shutting down and serializing the ChannelManager, we
                        // consider all peers as disconnected here. There's therefore no need write peers with
@@ -6962,6 +6924,11 @@ where
                        if !peer_state.ok_to_remove(false) {
                                peer_pubkey.write(writer)?;
                                peer_state.latest_features.write(writer)?;
+                               if !peer_state.monitor_update_blocked_actions.is_empty() {
+                                       monitor_update_blocked_actions_per_peer
+                                               .get_or_insert_with(Vec::new)
+                                               .push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
+                               }
                        }
                }
 
@@ -7039,8 +7006,6 @@ where
                        // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
                        // map. Thus, if there are no entries we skip writing a TLV for it.
                        pending_claiming_payments = None;
-               } else {
-                       debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
                }
 
                write_tlv_fields!(writer, {
@@ -7049,6 +7014,7 @@ where
                        (3, pending_outbound_payments, required),
                        (4, pending_claiming_payments, option),
                        (5, self.our_network_pubkey, required),
+                       (6, monitor_update_blocked_actions_per_peer, option),
                        (7, self.fake_scid_rand_bytes, required),
                        (9, htlc_purposes, vec_type),
                        (11, self.probing_cookie_secret, required),
@@ -7361,6 +7327,7 @@ where
                                channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
                                latest_features: Readable::read(reader)?,
                                pending_msg_events: Vec::new(),
+                               monitor_update_blocked_actions: BTreeMap::new(),
                                is_connected: false,
                        };
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -7417,12 +7384,14 @@ where
                let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
                let mut pending_claiming_payments = Some(HashMap::new());
+               let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (2, pending_intercepted_htlcs, option),
                        (3, pending_outbound_payments, option),
                        (4, pending_claiming_payments, option),
                        (5, received_network_pubkey, option),
+                       (6, monitor_update_blocked_actions_per_peer, option),
                        (7, fake_scid_rand_bytes, option),
                        (9, claimable_htlc_purposes, vec_type),
                        (11, probing_cookie_secret, option),
@@ -7689,6 +7658,15 @@ where
                        }
                }
 
+               for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
+                       if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
+                               peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
+                       } else {
+                               log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
+                               return Err(DecodeError::InvalidValue);
+                       }
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: bounded_fee_estimator,
@@ -8187,8 +8165,8 @@ mod tests {
 
                let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
                nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
                check_closed_broadcast!(nodes[0], true);
@@ -8408,6 +8386,213 @@ mod tests {
                check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
        }
 
+       #[test]
+       fn test_connection_limiting() {
+               // Test that we limit un-channel'd peers and un-funded channels properly.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Note that create_network connects the nodes together for us
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+               let mut funding_tx = None;
+               for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+                       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+                       let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+                       if idx == 0 {
+                               nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+                               let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+                               funding_tx = Some(tx.clone());
+                               nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
+                               let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+                               nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+                               check_added_monitors!(nodes[1], 1);
+                               let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+                               nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+                               check_added_monitors!(nodes[0], 1);
+                       }
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+
+               // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
+               open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
+               // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
+               // limit.
+               let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
+               for _ in 1..super::MAX_NO_CHANNEL_PEERS {
+                       let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                               &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+                       peer_pks.push(random_pk);
+                       nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               }
+               let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                       &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+               nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+               // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
+               // them if we have too many un-channel'd peers.
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
+               for ev in chan_closed_events {
+                       if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
+               }
+               nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+
+               // but of course if the connection is outbound its allowed...
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+               // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
+               // Even though we accept one more connection from new peers, we won't actually let them
+               // open channels.
+               assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
+               for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+                       nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
+                       get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // Of course, however, outbound channels are always allowed
+               nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap();
+               get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
+
+               // If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
+               // "protected" and can connect again.
+               mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+               // Further, because the first channel was funded, we can open another channel with
+               // last_random_pk.
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+       }
+
+       #[test]
+       fn test_outbound_chans_unlimited() {
+               // Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Note that create_network connects the nodes together for us
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+               for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
+                       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+                       get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+
+               // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
+               // rejected.
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // but we can still open an outbound channel.
+               nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
+
+               // but even with such an outbound channel, additional inbound channels will still fail.
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+               assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id,
+                       open_channel_msg.temporary_channel_id);
+       }
+
+       #[test]
+       fn test_0conf_limiting() {
+               // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
+               // flag set and (sometimes) accept channels as 0conf.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let mut settings = test_default_channel_config();
+               settings.manually_accept_inbound_channels = true;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               // Note that create_network connects the nodes together for us
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+               let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+               // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
+               for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
+                       let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                               &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+                       nodes[1].node.peer_connected(&random_pk, &msgs::Init {
+                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+
+                       nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
+                       let events = nodes[1].node.get_and_clear_pending_events();
+                       match events[0] {
+                               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                                       nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
+                               }
+                               _ => panic!("Unexpected event"),
+                       }
+                       get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
+                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               }
+
+               // If we try to accept a channel from another peer non-0conf it will fail.
+               let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
+                       &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
+               nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
+                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                               match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
+                                       Err(APIError::APIMisuseError { err }) =>
+                                               assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
+                                       _ => panic!(),
+                               }
+                       }
+                       _ => panic!("Unexpected event"),
+               }
+               assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id,
+                       open_channel_msg.temporary_channel_id);
+
+               // ...however if we accept the same channel 0conf it should work just fine.
+               nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               match events[0] {
+                       Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                               nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
+                       }
+                       _ => panic!("Unexpected event"),
+               }
+               get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
+       }
+
        #[cfg(anchors)]
        #[test]
        fn test_anchors_zero_fee_htlc_tx_fallback() {
@@ -8518,8 +8703,8 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap();
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap();
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
index 0c21588fb717af4aee2ef93a52f9da16abaaaaa3..fd867bc6ab7e07287c0572ae1cdb577e9e290ab8 100644 (file)
@@ -2374,8 +2374,8 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
 
        for i in 0..node_count {
                for j in (i+1)..node_count {
-                       nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()), remote_network_address: None }).unwrap();
-                       nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()), remote_network_address: None }).unwrap();
+                       nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()), remote_network_address: None }, true).unwrap();
+                       nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()), remote_network_address: None }, false).unwrap();
                }
        }
 
@@ -2658,9 +2658,9 @@ macro_rules! handle_chan_reestablish_msgs {
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
 pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
-       node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }).unwrap();
+       node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: node_b.node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
-       node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }).unwrap();
+       node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: node_a.node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
 
        if send_channel_ready.0 {
index 3da49ca7dc8d6db2a2a3942c42c1a43e2355351b..d6d2972d0736e3ec642fccee7363e44329560584 100644 (file)
@@ -3524,8 +3524,8 @@ fn test_dup_events_on_peer_disconnect() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
        expect_payment_sent_without_paths!(nodes[0], payment_preimage);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
        expect_payment_path_successful!(nodes[0]);
@@ -3565,8 +3565,8 @@ fn test_peer_disconnected_before_funding_broadcasted() {
 
        // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
        // disconnected before the funding transaction was broadcasted.
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
        check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
@@ -3582,8 +3582,8 @@ fn test_simple_peer_disconnect() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
        create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@@ -3591,8 +3591,8 @@ fn test_simple_peer_disconnect() {
        fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
@@ -3600,8 +3600,8 @@ fn test_simple_peer_disconnect() {
        let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
        let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
        fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
@@ -3618,22 +3618,22 @@ fn test_simple_peer_disconnect() {
                        _ => panic!("Unexpected event"),
                }
                match events[1] {
+                       Event::PaymentPathSuccessful { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+               match events[2] {
                        Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                                assert!(payment_failed_permanently);
                        },
                        _ => panic!("Unexpected event"),
                }
-               match events[2] {
+               match events[3] {
                        Event::PaymentFailed { payment_hash, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                        },
                        _ => panic!("Unexpected event"),
                }
-               match events[3] {
-                       Event::PaymentPathSuccessful { .. } => {},
-                       _ => panic!("Unexpected event"),
-               }
        }
 
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
@@ -3701,8 +3701,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                }
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        if messages_delivered < 3 {
                if simulate_broken_lnd {
                        // lnd has a long-standing bug where they send a channel_ready prior to a
@@ -3751,8 +3751,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                };
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        nodes[1].node.process_pending_htlc_forwards();
@@ -3834,8 +3834,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                }
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        if messages_delivered < 2 {
                reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
                if messages_delivered < 1 {
@@ -3861,8 +3861,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                expect_payment_path_successful!(nodes[0]);
        }
        if messages_delivered <= 5 {
-               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        }
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -3976,13 +3976,13 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
                _ => panic!("Unexpected event"),
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
 
@@ -6292,12 +6292,12 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
 
        //Disconnect and Reconnect
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
@@ -7032,8 +7032,8 @@ fn test_announce_disable_channels() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
        // Disconnect peers
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged
        nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
@@ -7053,10 +7053,10 @@ fn test_announce_disable_channels() {
                }
        }
        // Reconnect peers
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 3);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 3);
 
@@ -8186,7 +8186,7 @@ fn test_update_err_monitor_lockdown() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
                        assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
@@ -8280,7 +8280,7 @@ fn test_concurrent_monitor_claim() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Watchtower Alice should already have seen the block and reject the update
                        assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
                        assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
@@ -8736,9 +8736,9 @@ fn test_duplicate_chan_id() {
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
-       // At this point we'll try to add a duplicate channel monitor, which will be rejected, but
-       // still needs to be cleared here.
-       check_added_monitors!(nodes[1], 1);
+       // At this point we'll look up if the channel_id is present and immediately fail the channel
+       // without trying to persist the `ChannelMonitor`.
+       check_added_monitors!(nodes[1], 0);
 
        // ...still, nodes[1] will reject the duplicate channel.
        {
@@ -8832,13 +8832,11 @@ fn test_error_chans_closed() {
                _ => panic!("Unexpected event"),
        }
        // Note that at this point users of a standard PeerHandler will end up calling
-       // peer_disconnected with no_connection_possible set to false, duplicating the
-       // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
-       // users with their own peer handling logic. We duplicate the call here, however.
+       // peer_disconnected.
        assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
 }
@@ -8954,8 +8952,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
index e8b40c71d89e7f1825335bbe4a0abb0ed817970d..6e49a46f08a02a25065652e28cf3895afc031fe8 100644 (file)
@@ -993,21 +993,15 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider {
        fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures);
 
        // Connection loss/reestablish:
-       /// Indicates a connection to the peer failed/an existing connection was lost. If no connection
-       /// is believed to be possible in the future (eg they're sending us messages we don't
-       /// understand or indicate they require unknown feature bits), `no_connection_possible` is set
-       /// and any outstanding channels should be failed.
-       ///
-       /// Note that in some rare cases this may be called without a corresponding
-       /// [`Self::peer_connected`].
-       fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+       /// Indicates a connection to the peer failed/an existing connection was lost.
+       fn peer_disconnected(&self, their_node_id: &PublicKey);
 
        /// Handle a peer reconnecting, possibly generating `channel_reestablish` message(s).
        ///
        /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
        /// with us. Implementors should be somewhat conservative about doing so, however, as other
        /// message handlers may still wish to communicate with this peer.
-       fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init) -> Result<(), ()>;
+       fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>;
        /// Handle an incoming `channel_reestablish` message from the given peer.
        fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish);
 
@@ -1065,7 +1059,7 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
        /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
        /// with us. Implementors should be somewhat conservative about doing so, however, as other
        /// message handlers may still wish to communicate with this peer.
-       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>;
        /// Handles the reply of a query we initiated to learn about channels
        /// for a given range of blocks. We can expect to receive one or more
        /// replies to a single query.
@@ -1112,13 +1106,10 @@ pub trait OnionMessageHandler : OnionMessageProvider {
        /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
        /// with us. Implementors should be somewhat conservative about doing so, however, as other
        /// message handlers may still wish to communicate with this peer.
-       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init) -> Result<(), ()>;
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init, inbound: bool) -> Result<(), ()>;
        /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
        /// drop and refuse to forward onion messages to this peer.
-       ///
-       /// Note that in some rare cases this may be called without a corresponding
-       /// [`Self::peer_connected`].
-       fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+       fn peer_disconnected(&self, their_node_id: &PublicKey);
 
        // Handler information:
        /// Gets the node feature flags which this handler itself supports. All available handlers are
index bb69bbb32d51fd7ee95108c3c89ad06902fc58b7..da5aadb83065af7d6de274c30a021e4b9b05fc23 100644 (file)
@@ -576,8 +576,8 @@ fn test_onion_failure() {
        let short_channel_id = channels[1].0.contents.short_channel_id;
        run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {
                // disconnect event to the channel between nodes[1] ~ nodes[2]
-               nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-               nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+               nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
        reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
index 1d6eba5ade6507ee92c7ba7a3ddbd3f02dfd6200..86648e5fb724d39b4e027f2152ea002b32166f79 100644 (file)
@@ -253,12 +253,12 @@ fn no_pending_leak_on_initial_send_failure() {
 
        let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
                true, APIError::ChannelUnavailable { ref err },
-               assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!"));
+               assert_eq!(err, "Peer for first hop currently disconnected"));
 
        assert!(!nodes[0].node.has_pending_payments());
 }
@@ -310,8 +310,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
        // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
        // which would prevent retry.
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
@@ -340,13 +340,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(as_broadcasted_txn.len(), 1);
        assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
@@ -496,7 +496,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 
        reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
        // force-close the channel.
@@ -505,12 +505,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        assert!(nodes[0].node.has_pending_payments());
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
@@ -591,7 +591,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -615,7 +615,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // Check that after reload we can send the payment again (though we shouldn't, since it was
        // claimed previously).
        reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
@@ -660,8 +660,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
@@ -812,7 +812,7 @@ fn test_fulfill_restart_failure() {
        // Now reload nodes[1]...
        reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        nodes[1].node.fail_htlc_backwards(&payment_hash);
index 8c3ab968af20f92e3a544d7d1fea024837bd26e6..e9eaf33e8840b021afe7fbcfc60afec3e0fa5b3d 100644 (file)
@@ -79,7 +79,7 @@ impl RoutingMessageHandler for IgnoringMessageHandler {
        fn get_next_channel_announcement(&self, _starting_point: u64) ->
                Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
        fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<msgs::NodeAnnouncement> { None }
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
        fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
        fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -95,8 +95,8 @@ impl OnionMessageProvider for IgnoringMessageHandler {
 }
 impl OnionMessageHandler for IgnoringMessageHandler {
        fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
-       fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
+       fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
                InitFeatures::empty()
@@ -230,8 +230,8 @@ impl ChannelMessageHandler for ErroringMessageHandler {
        }
        // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
        fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
-       fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) -> Result<(), ()> { Ok(()) }
+       fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
        fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
        fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
        fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
@@ -322,16 +322,7 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the
 /// descriptor.
 #[derive(Clone)]
-pub struct PeerHandleError {
-       /// Used to indicate that we probably can't make any future connections to this peer (e.g.
-       /// because we required features that our peer was missing, or vice versa).
-       ///
-       /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
-       /// any channels with this peer or check for new versions of LDK.
-       ///
-       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
-       pub no_connection_possible: bool,
-}
+pub struct PeerHandleError { }
 impl fmt::Debug for PeerHandleError {
        fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
                formatter.write_str("Peer Sent Invalid Data")
@@ -400,6 +391,12 @@ struct Peer {
        /// We cache a `NodeId` here to avoid serializing peers' keys every time we forward gossip
        /// messages in `PeerManager`. Use `Peer::set_their_node_id` to modify this field.
        their_node_id: Option<(PublicKey, NodeId)>,
+       /// The features provided in the peer's [`msgs::Init`] message.
+       ///
+       /// This is set only after we've processed the [`msgs::Init`] message and called relevant
+       /// `peer_connected` handler methods. Thus, this field is set *iff* we've finished our
+       /// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
+       /// check this.
        their_features: Option<InitFeatures>,
        their_net_address: Option<NetAddress>,
 
@@ -428,9 +425,18 @@ struct Peer {
        /// `channel_announcement` at all - we set this unconditionally but unset it every time we
        /// check if we're gossip-processing-backlogged).
        received_channel_announce_since_backlogged: bool,
+
+       inbound_connection: bool,
 }
 
 impl Peer {
+       /// True after we've processed the [`msgs::Init`] message and called relevant `peer_connected`
+       /// handler methods. Thus, this implies we've finished our handshake and can talk to this peer
+       /// normally.
+       fn handshake_complete(&self) -> bool {
+               self.their_features.is_some()
+       }
+
        /// Returns true if the channel announcements/updates for the given channel should be
        /// forwarded to this peer.
        /// If we are sending our routing table to this peer and we have not yet sent channel
@@ -438,6 +444,7 @@ impl Peer {
        /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
        /// sent the old versions, we should send the update, and so return true here.
        fn should_forward_channel_announcement(&self, channel_id: u64) -> bool {
+               if !self.handshake_complete() { return false; }
                if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
                        !self.sent_gossip_timestamp_filter {
                                return false;
@@ -451,6 +458,7 @@ impl Peer {
 
        /// Similar to the above, but for node announcements indexed by node_id.
        fn should_forward_node_announcement(&self, node_id: NodeId) -> bool {
+               if !self.handshake_complete() { return false; }
                if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
                        !self.sent_gossip_timestamp_filter {
                                return false;
@@ -477,19 +485,20 @@ impl Peer {
        fn should_buffer_gossip_backfill(&self) -> bool {
                self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
                        && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+                       && self.handshake_complete()
        }
 
        /// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
        /// every time the peer's buffer may have been drained.
        fn should_buffer_onion_message(&self) -> bool {
-               self.pending_outbound_buffer.is_empty()
+               self.pending_outbound_buffer.is_empty() && self.handshake_complete()
                        && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
        }
 
        /// Determines if we should push additional gossip broadcast messages onto a peer's outbound
        /// buffer. This is checked every time the peer's buffer may have been drained.
        fn should_buffer_gossip_broadcast(&self) -> bool {
-               self.pending_outbound_buffer.is_empty()
+               self.pending_outbound_buffer.is_empty() && self.handshake_complete()
                        && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
        }
 
@@ -771,8 +780,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                let peers = self.peers.read().unwrap();
                peers.values().filter_map(|peer_mutex| {
                        let p = peer_mutex.lock().unwrap();
-                       if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() ||
-                               p.their_node_id.is_none() {
+                       if !p.handshake_complete() {
                                return None;
                        }
                        Some((p.their_node_id.unwrap().0, p.their_net_address.clone()))
@@ -830,6 +838,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        sent_gossip_timestamp_filter: false,
 
                        received_channel_announce_since_backlogged: false,
+                       inbound_connection: false,
                })).is_some() {
                        panic!("PeerManager driver duplicated descriptors!");
                };
@@ -879,6 +888,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        sent_gossip_timestamp_filter: false,
 
                        received_channel_announce_since_backlogged: false,
+                       inbound_connection: true,
                })).is_some() {
                        panic!("PeerManager driver duplicated descriptors!");
                };
@@ -1001,7 +1011,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // This is most likely a simple race condition where the user found that the socket
                                // was writeable, then we told the user to `disconnect_socket()`, then they called
                                // this method. Return an error to make sure we get disconnected.
-                               return Err(PeerHandleError { no_connection_possible: false });
+                               return Err(PeerHandleError { });
                        },
                        Some(peer_mutex) => {
                                let mut peer = peer_mutex.lock().unwrap();
@@ -1034,7 +1044,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        Ok(res) => Ok(res),
                        Err(e) => {
                                log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error");
-                               self.disconnect_event_internal(peer_descriptor, e.no_connection_possible);
+                               self.disconnect_event_internal(peer_descriptor);
                                Err(e)
                        }
                }
@@ -1067,7 +1077,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // This is most likely a simple race condition where the user read some bytes
                                // from the socket, then we told the user to `disconnect_socket()`, then they
                                // called this method. Return an error to make sure we get disconnected.
-                               return Err(PeerHandleError { no_connection_possible: false });
+                               return Err(PeerHandleError { });
                        },
                        Some(peer_mutex) => {
                                let mut read_pos = 0;
@@ -1081,7 +1091,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                msgs::ErrorAction::DisconnectPeer { msg: _ } => {
                                                                                        //TODO: Try to push msg
                                                                                        log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
-                                                                                       return Err(PeerHandleError{ no_connection_possible: false });
+                                                                                       return Err(PeerHandleError { });
                                                                                },
                                                                                msgs::ErrorAction::IgnoreAndLog(level) => {
                                                                                        log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
@@ -1134,7 +1144,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        hash_map::Entry::Occupied(_) => {
                                                                                log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
                                                                                peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
-                                                                               return Err(PeerHandleError{ no_connection_possible: false })
+                                                                               return Err(PeerHandleError { })
                                                                        },
                                                                        hash_map::Entry::Vacant(entry) => {
                                                                                log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0));
@@ -1191,7 +1201,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
                                                                        peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
                                                                        if msg_len < 2 { // Need at least the message type tag
-                                                                               return Err(PeerHandleError{ no_connection_possible: false });
+                                                                               return Err(PeerHandleError { });
                                                                        }
                                                                        peer.pending_read_is_header = false;
                                                                } else {
@@ -1234,19 +1244,19 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                (msgs::DecodeError::UnknownRequiredFeature, ty) => {
                                                                                                        log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
-                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                                       return Err(PeerHandleError { });
                                                                                                }
-                                                                                               (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
+                                                                                               (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
                                                                                                (msgs::DecodeError::InvalidValue, _) => {
                                                                                                        log_debug!(self.logger, "Got an invalid value while deserializing message");
-                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                                       return Err(PeerHandleError { });
                                                                                                }
                                                                                                (msgs::DecodeError::ShortRead, _) => {
                                                                                                        log_debug!(self.logger, "Deserialization failed due to shortness of message");
-                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                                       return Err(PeerHandleError { });
                                                                                                }
-                                                                                               (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
-                                                                                               (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
+                                                                                               (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
+                                                                                               (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }),
                                                                                        }
                                                                                }
                                                                        };
@@ -1298,10 +1308,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                if let wire::Message::Init(msg) = message {
                        if msg.features.requires_unknown_bits() {
                                log_debug!(self.logger, "Peer features required unknown version bits");
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
                        if peer_lock.their_features.is_some() {
-                               return Err(PeerHandleError{ no_connection_possible: false }.into());
+                               return Err(PeerHandleError { }.into());
                        }
 
                        log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
@@ -1311,24 +1321,24 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
                        }
 
-                       if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.route_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Route Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
-                       if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.chan_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Channel Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
-                       if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg) {
+                       if let Err(()) = self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
                                log_debug!(self.logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        }
 
                        peer_lock.their_features = Some(msg.features);
                        return Ok(None);
                } else if peer_lock.their_features.is_none() {
                        log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
-                       return Err(PeerHandleError{ no_connection_possible: false }.into());
+                       return Err(PeerHandleError { }.into());
                }
 
                if let wire::Message::GossipTimestampFilter(_msg) = message {
@@ -1380,7 +1390,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                }
                                self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
                                if msg.channel_id == [0; 32] {
-                                       return Err(PeerHandleError{ no_connection_possible: true }.into());
+                                       return Err(PeerHandleError { }.into());
                                }
                        },
                        wire::Message::Warning(msg) => {
@@ -1510,8 +1520,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        // Unknown messages:
                        wire::Message::Unknown(type_id) if message.is_even() => {
                                log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
-                               // Fail the channel if message is an even, unknown type as per BOLT #1.
-                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                               return Err(PeerHandleError { }.into());
                        },
                        wire::Message::Unknown(type_id) => {
                                log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
@@ -1531,10 +1540,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+                                       if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
                                        }
+                                       debug_assert!(peer.their_node_id.is_some());
+                                       debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1556,10 +1567,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+                                       if !peer.handshake_complete() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
                                        }
+                                       debug_assert!(peer.their_node_id.is_some());
+                                       debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1581,10 +1594,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                for (_, peer_mutex) in peers.iter() {
                                        let mut peer = peer_mutex.lock().unwrap();
-                                       if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
+                                       if !peer.handshake_complete() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
                                        }
+                                       debug_assert!(peer.their_node_id.is_some());
+                                       debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
                                        if peer.buffer_full_drop_gossip_broadcast() {
                                                log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
                                                continue;
@@ -1664,7 +1679,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        Some(descriptor) => match peers.get(&descriptor) {
                                                                Some(peer_mutex) => {
                                                                        let peer_lock = peer_mutex.lock().unwrap();
-                                                                       if peer_lock.their_features.is_none() {
+                                                                       if !peer_lock.handshake_complete() {
                                                                                continue;
                                                                        }
                                                                        peer_lock
@@ -1884,24 +1899,21 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                // thread can be holding the peer lock if we have the global write
                                // lock).
 
-                               if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+                               let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+                               if let Some(mut descriptor) = descriptor_opt {
                                        if let Some(peer_mutex) = peers.remove(&descriptor) {
+                                               let mut peer = peer_mutex.lock().unwrap();
                                                if let Some(msg) = msg {
                                                        log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                        log_pubkey!(node_id),
                                                                        msg.data);
-                                                       let mut peer = peer_mutex.lock().unwrap();
                                                        self.enqueue_message(&mut *peer, &msg);
                                                        // This isn't guaranteed to work, but if there is enough free
                                                        // room in the send buffer, put the error message there...
                                                        self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
-                                               } else {
-                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
                                                }
+                                               self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
                                        }
-                                       descriptor.disconnect_socket();
-                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
                                }
                        }
                }
@@ -1909,10 +1921,26 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Indicates that the given socket descriptor's connection is now closed.
        pub fn socket_disconnected(&self, descriptor: &Descriptor) {
-               self.disconnect_event_internal(descriptor, false);
+               self.disconnect_event_internal(descriptor);
        }
 
-       fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
+       fn do_disconnect(&self, mut descriptor: Descriptor, peer: &Peer, reason: &'static str) {
+               if !peer.handshake_complete() {
+                       log_trace!(self.logger, "Disconnecting peer which hasn't completed handshake due to {}", reason);
+                       descriptor.disconnect_socket();
+                       return;
+               }
+
+               debug_assert!(peer.their_node_id.is_some());
+               if let Some((node_id, _)) = peer.their_node_id {
+                       log_trace!(self.logger, "Disconnecting peer with id {} due to {}", node_id, reason);
+                       self.message_handler.chan_handler.peer_disconnected(&node_id);
+                       self.message_handler.onion_message_handler.peer_disconnected(&node_id);
+               }
+               descriptor.disconnect_socket();
+       }
+
+       fn disconnect_event_internal(&self, descriptor: &Descriptor) {
                let mut peers = self.peers.write().unwrap();
                let peer_option = peers.remove(descriptor);
                match peer_option {
@@ -1923,13 +1951,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        },
                        Some(peer_lock) => {
                                let peer = peer_lock.lock().unwrap();
+                               if !peer.handshake_complete() { return; }
+                               debug_assert!(peer.their_node_id.is_some());
                                if let Some((node_id, _)) = peer.their_node_id {
-                                       log_trace!(self.logger,
-                                               "Handling disconnection of peer {}, with {}future connection to the peer possible.",
-                                               log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
+                                       log_trace!(self.logger, "Handling disconnection of peer {}", log_pubkey!(node_id));
                                        self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
-                                       self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
-                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
+                                       self.message_handler.chan_handler.peer_disconnected(&node_id);
+                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id);
                                }
                        }
                };
@@ -1937,21 +1965,17 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
        /// Disconnect a peer given its node id.
        ///
-       /// Set `no_connection_possible` to true to prevent any further connection with this peer,
-       /// force-closing any channels we have with it.
-       ///
        /// If a peer is connected, this will call [`disconnect_socket`] on the descriptor for the
        /// peer. Thus, be very careful about reentrancy issues.
        ///
        /// [`disconnect_socket`]: SocketDescriptor::disconnect_socket
-       pub fn disconnect_by_node_id(&self, node_id: PublicKey, no_connection_possible: bool) {
+       pub fn disconnect_by_node_id(&self, node_id: PublicKey) {
                let mut peers_lock = self.peers.write().unwrap();
-               if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
-                       log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
-                       peers_lock.remove(&descriptor);
-                       self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
-                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
-                       descriptor.disconnect_socket();
+               if let Some(descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+                       let peer_opt = peers_lock.remove(&descriptor);
+                       if let Some(peer_mutex) = peer_opt {
+                               self.do_disconnect(descriptor, &*peer_mutex.lock().unwrap(), "client request");
+                       } else { debug_assert!(false, "node_id_to_descriptor thought we had a peer"); }
                }
        }
 
@@ -1962,13 +1986,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                let mut peers_lock = self.peers.write().unwrap();
                self.node_id_to_descriptor.lock().unwrap().clear();
                let peers = &mut *peers_lock;
-               for (mut descriptor, peer) in peers.drain() {
-                       if let Some((node_id, _)) = peer.lock().unwrap().their_node_id {
-                               log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
-                               self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                               self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
-                       }
-                       descriptor.disconnect_socket();
+               for (descriptor, peer_mutex) in peers.drain() {
+                       self.do_disconnect(descriptor, &*peer_mutex.lock().unwrap(), "client request to disconnect all peers");
                }
        }
 
@@ -2009,7 +2028,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                let mut peer = peer_mutex.lock().unwrap();
                                if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
 
-                               if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() {
+                               if !peer.handshake_complete() {
                                        // The peer needs to complete its handshake before we can exchange messages. We
                                        // give peers one timer tick to complete handshake, reusing
                                        // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken
@@ -2021,6 +2040,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                        }
                                        continue;
                                }
+                               debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+                               debug_assert!(peer.their_node_id.is_some());
 
                                loop { // Used as a `goto` to skip writing a Ping message.
                                        if peer.awaiting_pong_timer_tick_intervals == -1 {
@@ -2059,21 +2080,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                if !descriptors_needing_disconnect.is_empty() {
                        {
                                let mut peers_lock = self.peers.write().unwrap();
-                               for descriptor in descriptors_needing_disconnect.iter() {
-                                       if let Some(peer) = peers_lock.remove(descriptor) {
-                                               if let Some((node_id, _)) = peer.lock().unwrap().their_node_id {
-                                                       log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
+                               for descriptor in descriptors_needing_disconnect {
+                                       if let Some(peer_mutex) = peers_lock.remove(&descriptor) {
+                                               let peer = peer_mutex.lock().unwrap();
+                                               if let Some((node_id, _)) = peer.their_node_id {
                                                        self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
-                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                                                       self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
                                                }
+                                               self.do_disconnect(descriptor, &*peer, "ping timeout");
                                        }
                                }
                        }
-
-                       for mut descriptor in descriptors_needing_disconnect.drain(..) {
-                               descriptor.disconnect_socket();
-                       }
                }
        }
 
@@ -2161,6 +2177,7 @@ fn is_gossip_msg(type_id: u16) -> bool {
 #[cfg(test)]
 mod tests {
        use crate::chain::keysinterface::{NodeSigner, Recipient};
+       use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
        use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
        use crate::ln::{msgs, wire};
        use crate::ln::msgs::NetAddress;
@@ -2269,19 +2286,15 @@ mod tests {
                // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
                // push a DisconnectPeer event to remove the node flagged by id
                let cfgs = create_peermgr_cfgs(2);
-               let chan_handler = test_utils::TestChannelMessageHandler::new();
-               let mut peers = create_network(2, &cfgs);
+               let peers = create_network(2, &cfgs);
                establish_connection(&peers[0], &peers[1]);
                assert_eq!(peers[0].peers.read().unwrap().len(), 1);
 
                let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap();
-
-               chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
+               cfgs[0].chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
                        node_id: their_id,
                        action: msgs::ErrorAction::DisconnectPeer { msg: None },
                });
-               assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1);
-               peers[0].message_handler.chan_handler = &chan_handler;
 
                peers[0].process_events();
                assert_eq!(peers[0].peers.read().unwrap().len(), 0);
@@ -2315,6 +2328,35 @@ mod tests {
                assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
        }
 
+       #[test]
+       fn test_non_init_first_msg() {
+               // Simple test of the first message received over a connection being something other than
+               // Init. This results in an immediate disconnection, which previously included a spurious
+               // peer_disconnected event handed to event handlers (which would panic in
+               // `TestChannelMessageHandler` here).
+               let cfgs = create_peermgr_cfgs(2);
+               let peers = create_network(2, &cfgs);
+
+               let mut fd_dup = FileDescriptor { fd: 3, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+               let addr_dup = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1003};
+               let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap();
+               peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap();
+
+               let mut dup_encryptor = PeerChannelEncryptor::new_outbound(id_a, SecretKey::from_slice(&[42; 32]).unwrap());
+               let initial_data = dup_encryptor.get_act_one(&peers[1].secp_ctx);
+               assert_eq!(peers[0].read_event(&mut fd_dup, &initial_data).unwrap(), false);
+               peers[0].process_events();
+
+               let a_data = fd_dup.outbound_data.lock().unwrap().split_off(0);
+               let (act_three, _) =
+                       dup_encryptor.process_act_two(&a_data[..], &&cfgs[1].node_signer).unwrap();
+               assert_eq!(peers[0].read_event(&mut fd_dup, &act_three).unwrap(), false);
+
+               let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 };
+               let msg_bytes = dup_encryptor.encrypt_message(&not_init_msg);
+               assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err());
+       }
+
        #[test]
        fn test_disconnect_all_peer() {
                // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
index adc2b59abbf4b3180ab6650e5433305b1fb112ef..7636f5c63641edc5e18cb5d7f69d3375a9fa235b 100644 (file)
@@ -90,8 +90,8 @@ fn test_priv_forwarding_rejection() {
        // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
        // to true. Sadly there is currently no way to change it at runtime.
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        let nodes_1_serialized = nodes[1].node.encode();
        let monitor_a_serialized = get_monitor!(nodes[1], chan_id_1).encode();
@@ -100,8 +100,8 @@ fn test_priv_forwarding_rejection() {
        no_announce_cfg.accept_forwards_to_priv_channels = true;
        reload_node!(nodes[1], no_announce_cfg, &nodes_1_serialized, &[&monitor_a_serialized, &monitor_b_serialized], persister, new_chain_monitor, nodes_1_deserialized);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
@@ -109,8 +109,8 @@ fn test_priv_forwarding_rejection() {
        get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
        get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
 
-       nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap();
        let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap();
        nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
index 8e5056dc68e45750e4bc354df973b86d403720ae..14deefd3f93828181f6cfad952d48913c8e25586 100644 (file)
@@ -44,8 +44,8 @@ fn test_funding_peer_disconnect() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        confirm_transaction(&nodes[0], &tx);
        let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
@@ -53,16 +53,16 @@ fn test_funding_peer_disconnect() {
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        confirm_transaction(&nodes[1], &tx);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert!(events_2.is_empty());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
@@ -169,7 +169,7 @@ fn test_funding_peer_disconnect() {
 
        // Check that after deserialization and reconnection we can still generate an identical
        // channel_announcement from the cached signatures.
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 
@@ -190,15 +190,15 @@ fn test_no_txn_manager_serialize_deserialize() {
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized =
                get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -267,7 +267,7 @@ fn test_manager_serialize_deserialize_events() {
        let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // After deserializing, make sure the funding_transaction is still held by the channel manager
        let events_4 = nodes[0].node.get_and_clear_pending_events();
@@ -278,9 +278,9 @@ fn test_manager_serialize_deserialize_events() {
        // Make sure the channel is functioning as though the de/serialization never happened
        assert_eq!(nodes[0].node.list_channels().len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -313,7 +313,7 @@ fn test_simple_manager_serialize_deserialize() {
        let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
        reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
@@ -353,9 +353,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let nodes_0_serialized = nodes[0].node.encode();
 
        route_payment(&nodes[0], &[&nodes[3]], 1000000);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
        // nodes[3])
@@ -443,9 +443,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        //... and we can even still claim the payment!
        claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
 
-       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
        let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
-       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
        let mut found_err = false;
        for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
@@ -488,14 +488,14 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
 
        if reconnect_panicing {
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
 
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
@@ -543,8 +543,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
@@ -627,8 +627,8 @@ fn test_forwardable_regen() {
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
        let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
@@ -753,8 +753,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
        assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
 
-       nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id());
 
        // During deserialization, we should have closed one channel and broadcast its latest
        // commitment transaction. We should also still have the original PaymentClaimable event we
@@ -779,9 +779,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
        if !persist_both_monitors {
                // If one of the two channels is still live, reveal the payment preimage over it.
 
-               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: nodes[2].node.init_features(), remote_network_address: None }, true).unwrap();
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
-               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: nodes[3].node.init_features(), remote_network_address: None }, false).unwrap();
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
 
                nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
@@ -923,7 +923,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
        let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(bs_commitment_tx.len(), 1);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        if use_cs_commitment {
@@ -1038,7 +1038,7 @@ fn removed_payment_no_manager_persistence() {
        // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
        // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
        // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
index ce1f3b64750bafbd7343ff66f47e8b16677d30a4..69e0c9afa9e49c056c7d081ef8e42913d7492d55 100644 (file)
@@ -378,7 +378,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                // If we dropped the channel before reloading the node, nodes[1] was also dropped from
                // nodes[0] storage, and hence not connected again on startup. We therefore need to
                // reconnect to the node before attempting to create a new channel.
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        }
        create_announced_chan_between_nodes(&nodes, 0, 1);
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
index 5a5f054846f8d4e66d720aaa3bc77a29b1b7da6e..75eacb74bc2fe7352a99288e28fcef82eac4c81d 100644 (file)
@@ -243,12 +243,12 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                }
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let node_1_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
@@ -305,12 +305,12 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                assert!(node_0_2nd_closing_signed.is_some());
        }
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
        let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, false).unwrap();
        if recv_count == 0 {
                // If all closing_signeds weren't delivered we can just resume where we left off...
                let node_0_2nd_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
index 1309baf60eac7ed85a5493ec8ca102653cad6eb6..aee7f562defc5915517d696cf032353deee4e3d6 100644 (file)
@@ -85,8 +85,8 @@ fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
                let mut features = InitFeatures::empty();
                features.set_onion_messages_optional();
                let init_msg = msgs::Init { features, remote_network_address: None };
-               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone()).unwrap();
-               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone()).unwrap();
+               nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone(), true).unwrap();
+               nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone(), false).unwrap();
        }
        nodes
 }
index f7656e7949aa9df945010eadee9b0ba30eefb861..7814ccb6508c77a10abfa32ee189914493a62278 100644 (file)
@@ -417,7 +417,7 @@ impl<ES: Deref, NS: Deref, L: Deref, CMH: Deref> OnionMessageHandler for OnionMe
                };
        }
 
-       fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                if init.features.supports_onion_messages() {
                        let mut peers = self.pending_messages.lock().unwrap();
                        peers.insert(their_node_id.clone(), VecDeque::new());
@@ -425,7 +425,7 @@ impl<ES: Deref, NS: Deref, L: Deref, CMH: Deref> OnionMessageHandler for OnionMe
                Ok(())
        }
 
-       fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
+       fn peer_disconnected(&self, their_node_id: &PublicKey) {
                let mut pending_msgs = self.pending_messages.lock().unwrap();
                pending_msgs.remove(their_node_id);
        }
index 6c2d70bd6f309ed429af45876666673fc6421a35..cf51b6ab52827943626ede2cbc5d0e87e1f95366 100644 (file)
@@ -390,7 +390,7 @@ where U::Target: UtxoLookup, L::Target: Logger
        }
 
        fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
-               let channels = self.network_graph.channels.read().unwrap();
+               let mut channels = self.network_graph.channels.write().unwrap();
                for (_, ref chan) in channels.range(starting_point..) {
                        if chan.announcement_message.is_some() {
                                let chan_announcement = chan.announcement_message.clone().unwrap();
@@ -412,7 +412,7 @@ where U::Target: UtxoLookup, L::Target: Logger
        }
 
        fn get_next_node_announcement(&self, starting_point: Option<&NodeId>) -> Option<NodeAnnouncement> {
-               let nodes = self.network_graph.nodes.read().unwrap();
+               let mut nodes = self.network_graph.nodes.write().unwrap();
                let iter = if let Some(node_id) = starting_point {
                                nodes.range((Bound::Excluded(node_id), Bound::Unbounded))
                        } else {
@@ -437,7 +437,7 @@ where U::Target: UtxoLookup, L::Target: Logger
        /// to request gossip messages for each channel. The sync is considered complete
        /// when the final reply_scids_end message is received, though we are not
        /// tracking this directly.
-       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init, _inbound: bool) -> Result<(), ()> {
                // We will only perform a sync with peers that support gossip_queries.
                if !init_msg.features.supports_gossip_queries() {
                        // Don't disconnect peers for not supporting gossip queries. We may wish to have
@@ -572,7 +572,7 @@ where U::Target: UtxoLookup, L::Target: Logger
                // (has at least one update). A peer may still want to know the channel
                // exists even if its not yet routable.
                let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
-               let channels = self.network_graph.channels.read().unwrap();
+               let mut channels = self.network_graph.channels.write().unwrap();
                for (_, ref chan) in channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap()) {
                        if let Some(chan_announcement) = &chan.announcement_message {
                                // Construct a new batch if last one is full
@@ -2791,7 +2791,7 @@ pub(crate) mod tests {
                // It should ignore if gossip_queries feature is not enabled
                {
                        let init_msg = Init { features: InitFeatures::empty(), remote_network_address: None };
-                       gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap();
+                       gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap();
                        let events = gossip_sync.get_and_clear_pending_msg_events();
                        assert_eq!(events.len(), 0);
                }
@@ -2801,7 +2801,7 @@ pub(crate) mod tests {
                        let mut features = InitFeatures::empty();
                        features.set_gossip_queries_optional();
                        let init_msg = Init { features, remote_network_address: None };
-                       gossip_sync.peer_connected(&node_id_1, &init_msg).unwrap();
+                       gossip_sync.peer_connected(&node_id_1, &init_msg, true).unwrap();
                        let events = gossip_sync.get_and_clear_pending_msg_events();
                        assert_eq!(events.len(), 1);
                        match &events[0] {
index cccbfe7bc7a43434a12b8c7608dff2d935e5c1dc..3d45172517db3e706dcbf4884dd75923c872c8a5 100644 (file)
@@ -1,10 +1,11 @@
 //! This module has a map which can be iterated in a deterministic order. See the [`IndexedMap`].
 
 use crate::prelude::{HashMap, hash_map};
-use alloc::collections::{BTreeSet, btree_set};
+use alloc::vec::Vec;
+use alloc::slice::Iter;
 use core::hash::Hash;
 use core::cmp::Ord;
-use core::ops::RangeBounds;
+use core::ops::{Bound, RangeBounds};
 
 /// A map which can be iterated in a deterministic order.
 ///
@@ -21,11 +22,10 @@ use core::ops::RangeBounds;
 /// keys in the order defined by [`Ord`].
 ///
 /// [`BTreeMap`]: alloc::collections::BTreeMap
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, Eq)]
 pub struct IndexedMap<K: Hash + Ord, V> {
        map: HashMap<K, V>,
-       // TODO: Explore swapping this for a sorted vec (that is only sorted on first range() call)
-       keys: BTreeSet<K>,
+       keys: Vec<K>,
 }
 
 impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
@@ -33,7 +33,7 @@ impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
        pub fn new() -> Self {
                Self {
                        map: HashMap::new(),
-                       keys: BTreeSet::new(),
+                       keys: Vec::new(),
                }
        }
 
@@ -58,7 +58,8 @@ impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
        pub fn remove(&mut self, key: &K) -> Option<V> {
                let ret = self.map.remove(key);
                if let Some(_) = ret {
-                       assert!(self.keys.remove(key), "map and keys must be consistent");
+                       let idx = self.keys.iter().position(|k| k == key).expect("map and keys must be consistent");
+                       self.keys.remove(idx);
                }
                ret
        }
@@ -68,7 +69,7 @@ impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
        pub fn insert(&mut self, key: K, value: V) -> Option<V> {
                let ret = self.map.insert(key.clone(), value);
                if ret.is_none() {
-                       assert!(self.keys.insert(key), "map and keys must be consistent");
+                       self.keys.push(key);
                }
                ret
        }
@@ -109,9 +110,21 @@ impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
        }
 
        /// Returns an iterator which iterates over the `key`/`value` pairs in a given range.
-       pub fn range<R: RangeBounds<K>>(&self, range: R) -> Range<K, V> {
+       pub fn range<R: RangeBounds<K>>(&mut self, range: R) -> Range<K, V> {
+               self.keys.sort_unstable();
+               let start = match range.start_bound() {
+                       Bound::Unbounded => 0,
+                       Bound::Included(key) => self.keys.binary_search(key).unwrap_or_else(|index| index),
+                       Bound::Excluded(key) => self.keys.binary_search(key).and_then(|index| Ok(index + 1)).unwrap_or_else(|index| index),
+               };
+               let end = match range.end_bound() {
+                       Bound::Unbounded => self.keys.len(),
+                       Bound::Included(key) => self.keys.binary_search(key).and_then(|index| Ok(index + 1)).unwrap_or_else(|index| index),
+                       Bound::Excluded(key) => self.keys.binary_search(key).unwrap_or_else(|index| index),
+               };
+
                Range {
-                       inner_range: self.keys.range(range),
+                       inner_range: self.keys[start..end].iter(),
                        map: &self.map,
                }
        }
@@ -127,9 +140,15 @@ impl<K: Clone + Hash + Ord, V> IndexedMap<K, V> {
        }
 }
 
+impl<K: Hash + Ord + PartialEq, V: PartialEq> PartialEq for IndexedMap<K, V> {
+       fn eq(&self, other: &Self) -> bool {
+               self.map == other.map
+       }
+}
+
 /// An iterator over a range of values in an [`IndexedMap`]
 pub struct Range<'a, K: Hash + Ord, V> {
-       inner_range: btree_set::Range<'a, K>,
+       inner_range: Iter<'a, K>,
        map: &'a HashMap<K, V>,
 }
 impl<'a, K: Hash + Ord, V: 'a> Iterator for Range<'a, K, V> {
@@ -148,7 +167,7 @@ pub struct VacantEntry<'a, K: Hash + Ord, V> {
        #[cfg(not(feature = "hashbrown"))]
        underlying_entry: hash_map::VacantEntry<'a, K, V>,
        key: K,
-       keys: &'a mut BTreeSet<K>,
+       keys: &'a mut Vec<K>,
 }
 
 /// An [`Entry`] for an existing key-value pair
@@ -157,7 +176,7 @@ pub struct OccupiedEntry<'a, K: Hash + Ord, V> {
        underlying_entry: hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>,
        #[cfg(not(feature = "hashbrown"))]
        underlying_entry: hash_map::OccupiedEntry<'a, K, V>,
-       keys: &'a mut BTreeSet<K>,
+       keys: &'a mut Vec<K>,
 }
 
 /// A mutable reference to a position in the map. This can be used to reference, add, or update the
@@ -172,7 +191,7 @@ pub enum Entry<'a, K: Hash + Ord, V> {
 impl<'a, K: Hash + Ord, V> VacantEntry<'a, K, V> {
        /// Insert a value into the position described by this entry.
        pub fn insert(self, value: V) -> &'a mut V {
-               assert!(self.keys.insert(self.key), "map and keys must be consistent");
+               self.keys.push(self.key);
                self.underlying_entry.insert(value)
        }
 }
@@ -181,7 +200,8 @@ impl<'a, K: Hash + Ord, V> OccupiedEntry<'a, K, V> {
        /// Remove the value at the position described by this entry.
        pub fn remove_entry(self) -> (K, V) {
                let res = self.underlying_entry.remove_entry();
-               assert!(self.keys.remove(&res.0), "map and keys must be consistent");
+               let idx = self.keys.iter().position(|k| k == &res.0).expect("map and keys must be consistent");
+               self.keys.remove(idx);
                res
        }
 
index 928cc61946e46f358a70e82672b06bda1dec4867..847005e324e60f5a0e2e42d67e31009571b27e2b 100644 (file)
@@ -770,6 +770,7 @@ impl Readable for Vec<u8> {
 }
 
 impl_for_vec!(ecdsa::Signature);
+impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction);
 impl_for_vec!((A, B), A, B);
 
 impl Writeable for Script {
index b2b5cacfe97e1e25d0f1e3dc7a6463b771fdfcea..344f5b4c62d01736afd85654064735c68f0456bd 100644 (file)
@@ -331,6 +331,7 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster {
 pub struct TestChannelMessageHandler {
        pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
        expected_recv_msgs: Mutex<Option<Vec<wire::Message<()>>>>,
+       connected_peers: Mutex<HashSet<PublicKey>>,
 }
 
 impl TestChannelMessageHandler {
@@ -338,6 +339,7 @@ impl TestChannelMessageHandler {
                TestChannelMessageHandler {
                        pending_events: Mutex::new(Vec::new()),
                        expected_recv_msgs: Mutex::new(None),
+                       connected_peers: Mutex::new(HashSet::new()),
                }
        }
 
@@ -422,8 +424,11 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
        fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
                self.received_msg(wire::Message::ChannelReestablish(msg.clone()));
        }
-       fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) -> Result<(), ()> {
+       fn peer_disconnected(&self, their_node_id: &PublicKey) {
+               assert!(self.connected_peers.lock().unwrap().remove(their_node_id));
+       }
+       fn peer_connected(&self, their_node_id: &PublicKey, _msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
+               assert!(self.connected_peers.lock().unwrap().insert(their_node_id.clone()));
                // Don't bother with `received_msg` for Init as its auto-generated and we don't want to
                // bother re-generating the expected Init message in all tests.
                Ok(())
@@ -539,7 +544,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                None
        }
 
-       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
                if !init_msg.features.supports_gossip_queries() {
                        return Ok(());
                }