+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
+}
+
+#[test]
+fn double_temp_error() {
+ // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ // `claim_funds` results in a ChannelMonitorUpdate.
+ nodes[1].node.claim_funds(payment_preimage_1);
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
+ let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
+ // which had some asserts that prevented it from being called twice.
+ nodes[1].node.claim_funds(payment_preimage_2);
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+
+ let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 0);
+ nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
+
+ // Complete the first HTLC.
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let (update_fulfill_1, commitment_signed_b1, node_id) = {
+ match &events[0] {
+ &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+ (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ };
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
+ check_added_monitors!(nodes[0], 0);
+ expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.process_pending_htlc_forwards();
+ let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 0);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
+ check_added_monitors!(nodes[1], 1);
+
+ // Complete the second HTLC.
+ let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ (match &events[0] {
+ MessageSendEvent::UpdateHTLCs { node_id, updates } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ },
+ match events[1] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ })
+ };
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
+ check_added_monitors!(nodes[0], 1);
+ expect_payment_path_successful!(nodes[0]);
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
+ check_added_monitors!(nodes[0], 0);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
+ expect_payment_sent!(nodes[0], payment_preimage_2);
+}
+
+fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
+ // Test that if the monitor update generated in funding_signed is stored async and we restart
+ // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
+ // drop the channel and move on.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+
+ let mut chan_config = test_default_channel_config();
+ chan_config.manually_accept_inbound_channels = true;
+ chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ if use_0conf {
+ nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ } else {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ }
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+
+ let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
+ match &bs_signed_locked[0] {
+ MessageSendEvent::SendFundingSigned { msg, .. } => {
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
+ check_added_monitors!(nodes[0], 1);
+ }
+ _ => panic!("Unexpected event"),
+ }
+ if use_0conf {
+ match &bs_signed_locked[1] {
+ MessageSendEvent::SendChannelReady { msg, .. } => {
+ nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
+ }
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+ // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
+ // broadcast the funding transaction. If nodes[0] restarts at this point with the
+ // ChannelMonitor lost, we should simply discard the channel.
+
+ // The test framework checks that watched_txn/outputs match the monitor set, which they will
+ // not, so we have to clear them here.
+ nodes[0].chain_source.watched_txn.lock().unwrap().clear();
+ nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
+
+ reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
+ check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
+ assert!(nodes[0].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_outbound_reload_without_init_mon() {
+ do_test_outbound_reload_without_init_mon(true);
+ do_test_outbound_reload_without_init_mon(false);
+}
+
+fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
+ // Test that if the monitor update generated by funding_transaction_generated is stored async
+ // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
+ // completed we happily drop the channel and move on.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+
+ let mut chan_config = test_default_channel_config();
+ chan_config.manually_accept_inbound_channels = true;
+ chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
+
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ if use_0conf {
+ nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ } else {
+ nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+ }
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+
+ let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ check_added_monitors!(nodes[1], 1);
+
+ // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
+ // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
+ // transaction is confirmed.
+ let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+ check_added_monitors!(nodes[0], 1);
+
+ let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ if lock_commitment {
+ confirm_transaction(&nodes[0], &as_funding_tx[0]);
+ confirm_transaction(&nodes[1], &as_funding_tx[0]);
+ }
+ if use_0conf || lock_commitment {
+ let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
+ // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
+ // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
+
+ // The test framework checks that watched_txn/outputs match the monitor set, which they will
+ // not, so we have to clear them here.
+ nodes[1].chain_source.watched_txn.lock().unwrap().clear();
+ nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
+
+ reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
+
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+ assert!(nodes[1].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_inbound_reload_without_init_mon() {
+ do_test_inbound_reload_without_init_mon(true, true);
+ do_test_inbound_reload_without_init_mon(true, false);
+ do_test_inbound_reload_without_init_mon(false, true);
+ do_test_inbound_reload_without_init_mon(false, false);