Merge pull request #2999 from keyuebao/keyue/update-ldk-docs
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
index dc7f108449decd336d388ca785c6d18a777393ab..11ab1fbb85158a40f6e796ab1b26856df16c8458 100644 (file)
@@ -21,7 +21,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
 use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
 use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::errors::APIError;
@@ -37,43 +37,6 @@ use bitcoin::hashes::Hash;
 use crate::prelude::*;
 use crate::sync::{Arc, Mutex};
 
-#[test]
-fn test_simple_monitor_permanent_update_fail() {
-       // Test that we handle a simple permanent monitor update failure
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1);
-
-       let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
-       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
-       unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
-                       RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
-               ), true, APIError::ChannelUnavailable {..}, {});
-       check_added_monitors!(nodes[0], 2);
-
-       let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events_1.len(), 2);
-       match events_1[0] {
-               MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-               _ => panic!("Unexpected event"),
-       };
-       match events_1[1] {
-               MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
-               _ => panic!("Unexpected event"),
-       };
-
-       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
-
-       // TODO: Once we hit the chain with the failure transaction we should check that we get a
-       // PaymentPathFailed event
-
-       assert_eq!(nodes[0].node.list_channels().len(), 0);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
-               [nodes[1].node.get_our_node_id()], 100000);
-}
-
 #[test]
 fn test_monitor_and_persister_update_fail() {
        // Test that if both updating the `ChannelMonitor` and persisting the updated
@@ -117,14 +80,11 @@ fn test_monitor_and_persister_update_fail() {
                        new_monitor
                };
                let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
+               assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                chain_mon
        };
        chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
 
-       // Set the persister's return value to be a InProgress.
-       persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-
        // Try to update ChannelMonitor
        nodes[1].node.claim_funds(preimage);
        expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
@@ -133,17 +93,21 @@ fn test_monitor_and_persister_update_fail() {
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
                        if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                               // Check that even though the persister is returning a InProgress,
-                               // because the update is bogus, ultimately the error that's returned
-                               // should be a PermanentFailure.
-                               if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
-                               logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
-                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                               // Check that the persister returns InProgress (and will never actually complete)
+                               // as the monitor update errors.
+                               if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor paused"); }
+                               logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1);
+
+                               // Apply the monitor update to the original ChainMonitor, ensuring the
+                               // ChannelManager and ChannelMonitor aren't out of sync.
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update),
+                                       ChannelMonitorUpdateStatus::Completed);
                        } else { assert!(false); }
                } else {
                        assert!(false);
@@ -151,8 +115,7 @@ fn test_monitor_and_persister_update_fail() {
        }
 
        check_added_monitors!(nodes[0], 1);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
+       expect_payment_sent(&nodes[0], preimage, None, false, false);
 }
 
 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
@@ -210,11 +173,11 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
                        assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
                        assert_eq!(via_channel_id, Some(channel_id));
                        match &purpose {
-                               PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+                               PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
                                        assert_eq!(payment_secret_1, *payment_secret);
                                },
-                               _ => panic!("expected PaymentPurpose::InvoicePayment")
+                               _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
                        }
                },
                _ => panic!("Unexpected event"),
@@ -591,11 +554,11 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                        assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
                        assert_eq!(via_channel_id, Some(channel_id));
                        match &purpose {
-                               PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+                               PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
                                        assert_eq!(payment_secret_2, *payment_secret);
                                },
-                               _ => panic!("expected PaymentPurpose::InvoicePayment")
+                               _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
                        }
                },
                _ => panic!("Unexpected event"),
@@ -709,11 +672,11 @@ fn test_monitor_update_fail_cs() {
                        assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
                        assert_eq!(via_channel_id, Some(channel_id));
                        match &purpose {
-                               PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+                               PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
                                        assert_eq!(our_payment_secret, *payment_secret);
                                },
-                               _ => panic!("expected PaymentPurpose::InvoicePayment")
+                               _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
                        }
                },
                _ => panic!("Unexpected event"),
@@ -1720,11 +1683,11 @@ fn test_monitor_update_fail_claim() {
                        assert_eq!(via_channel_id, Some(channel_id));
                        assert_eq!(via_user_channel_id, Some(42));
                        match &purpose {
-                               PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+                               PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
                                        assert_eq!(payment_secret_2, *payment_secret);
                                },
-                               _ => panic!("expected PaymentPurpose::InvoicePayment")
+                               _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
                        }
                },
                _ => panic!("Unexpected event"),
@@ -1736,11 +1699,11 @@ fn test_monitor_update_fail_claim() {
                        assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
                        assert_eq!(via_channel_id, Some(channel_id));
                        match &purpose {
-                               PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+                               PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
                                        assert_eq!(payment_secret_3, *payment_secret);
                                },
-                               _ => panic!("expected PaymentPurpose::InvoicePayment")
+                               _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
                        }
                },
                _ => panic!("Unexpected event"),
@@ -1887,7 +1850,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
 
@@ -1898,7 +1861,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-       let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
@@ -1992,8 +1955,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
 
        send_payment(&nodes[0], &[&nodes[1]], 8000000);
        close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2671,70 +2634,8 @@ fn test_temporary_error_during_shutdown() {
        assert_eq!(txn_a, txn_b);
        assert_eq!(txn_a.len(), 1);
        check_spends!(txn_a[0], funding_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-}
-
-#[test]
-fn test_permanent_error_during_sending_shutdown() {
-       // Test that permanent failures when updating the monitor's shutdown script result in a force
-       // close when initiating a cooperative close.
-       let mut config = test_default_channel_config();
-       config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
-
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
-       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
-       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
-
-       assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
-
-       // We always send the `shutdown` response when initiating a shutdown, even if we immediately
-       // close the channel thereafter.
-       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(msg_events.len(), 3);
-       if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
-       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
-       if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
-
-       check_added_monitors!(nodes[0], 2);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
-               [nodes[1].node.get_our_node_id()], 100000);
-}
-
-#[test]
-fn test_permanent_error_during_handling_shutdown() {
-       // Test that permanent failures when updating the monitor's shutdown script result in a force
-       // close when handling a cooperative close.
-       let mut config = test_default_channel_config();
-       config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
-
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
-       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
-       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
-
-       assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
-       let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
-       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
-
-       // We always send the `shutdown` response when receiving a shutdown, even if we immediately
-       // close the channel thereafter.
-       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(msg_events.len(), 3);
-       if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
-       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
-       if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
-
-       check_added_monitors!(nodes[1], 2);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
-               [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2867,7 +2768,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
 
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
 
        let events = nodes[1].node.get_and_clear_pending_events();
@@ -2958,7 +2859,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo
 
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
 
        let events = nodes[1].node.get_and_clear_pending_events();
@@ -3429,3 +3330,174 @@ fn test_durable_preimages_on_closed_channel() {
        do_test_durable_preimages_on_closed_channel(false, false, true);
        do_test_durable_preimages_on_closed_channel(false, false, false);
 }
+
+fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
+       // Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
+       // before restart we run the monitor update completion action on startup.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+       let persister;
+       let new_chain_monitor;
+       let nodes_1_deserialized;
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+       // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+       // `update_fulfill_htlc`+`commitment_signed` we have a monitor update for both of B's channels.
+       // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor
+       // update pending, then reload B. At that point, the final monitor update on the B<->C channel
+       // is still pending because it can't fly until the preimage is persisted on the A<->B monitor.
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors(&nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+       chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+       // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+       // for it since the monitor update is marked in-progress.
+       check_added_monitors(&nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       // Now step the Commitment Signed Dance between B and C and check that after the final RAA B
+       // doesn't let the preimage-removing monitor update fly.
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+       check_added_monitors(&nodes[1], 1);
+       let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+       nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+       check_added_monitors(&nodes[2], 1);
+       nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
+       check_added_monitors(&nodes[2], 1);
+
+       let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_final_raa);
+       check_added_monitors(&nodes[1], 0);
+
+       // Finally, reload node B and check that after we call `process_pending_events` once we realize
+       // we've completed the A<->B preimage-including monitor update and so can release the B<->C
+       // preimage-removing monitor update.
+       let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+       let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+       let manager_b = nodes[1].node.encode();
+       reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+       if close_during_reload {
+               // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
+               // (as learned about during the on-reload block connection).
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               check_closed_broadcast!(nodes[0], true);
+               check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+               let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+               mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
+       }
+
+       let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
+       let mut events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
+       expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000),
+               None, close_during_reload, false, false);
+       if close_during_reload {
+               match events[0] {
+                       Event::ChannelClosed { .. } => {},
+                       _ => panic!(),
+               }
+               check_closed_broadcast!(nodes[1], true);
+       }
+
+       // Once we run event processing the monitor should free, check that it was indeed the B<->C
+       // channel which was updated.
+       check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 });
+       let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
+       assert!(bc_update_id != post_ev_bc_update_id);
+
+       // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates
+       // fine.
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
+       send_payment(&nodes[1], &[&nodes[2]], 100_000);
+}
+
+#[test]
+fn test_reload_mon_update_completion_actions() {
+       do_test_reload_mon_update_completion_actions(true);
+       do_test_reload_mon_update_completion_actions(false);
+}
+
+fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) {
+       // Test that if a peer manages to send an `update_fulfill_htlc` message without a
+       // `commitment_signed`, disconnects, then replays the `update_fulfill_htlc` message it doesn't
+       // result in a channel hang. This was previously broken as the `DuplicateClaim` case wasn't
+       // handled when claiming an HTLC and handling wasn't added when completion actions were added
+       // (which must always complete at some point).
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       create_announced_chan_between_nodes(&nodes, 1, 2);
+
+       // Route a payment from A, through B, to C, then claim it on C. Replay the
+       // `update_fulfill_htlc` twice on B to check that B doesn't hang.
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors(&nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+       let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+       if hold_chan_a {
+               // The first update will be on the A <-> B channel, which we allow to complete.
+               chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       }
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+       check_added_monitors(&nodes[1], 1);
+
+       if !hold_chan_a {
+               let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
+               expect_payment_sent!(&nodes[0], payment_preimage);
+       }
+
+       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+       let mut reconnect = ReconnectArgs::new(&nodes[1], &nodes[2]);
+       reconnect.pending_htlc_claims = (1, 0);
+       reconnect_nodes(reconnect);
+
+       if !hold_chan_a {
+               expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
+               send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+       } else {
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+               let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[1], nodes[2], 1_000_000);
+
+               nodes[1].node.send_payment_with_route(&route, payment_hash_2,
+                       RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
+               check_added_monitors(&nodes[1], 0);
+
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       }
+}
+
+#[test]
+fn test_glacial_peer_cant_hang() {
+       do_test_glacial_peer_cant_hang(false);
+       do_test_glacial_peer_cant_hang(true);
+}