+
+#[test]
+fn test_counterparty_revoked_reorg() {
+ // Test what happens when a revoked counterparty transaction is broadcast but then reorg'd out
+ // of the main chain. Specifically, HTLCs in the latest commitment transaction which are not
+ // included in the revoked commitment transaction should not be considered failed, and should
+ // still be claim-from-able after the reorg.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
+
+ // Get the initial commitment transaction for broadcast, before any HTLCs are added at all.
+ let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
+ assert_eq!(revoked_local_txn.len(), 1);
+
+ // Now add two HTLCs in each direction, one dust and one not.
+ route_payment(&nodes[0], &[&nodes[1]], 5_000_000);
+ route_payment(&nodes[0], &[&nodes[1]], 5_000);
+ let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[1], &[&nodes[0]], 4_000_000);
+ let payment_hash_4 = route_payment(&nodes[1], &[&nodes[0]], 4_000).1;
+
+ nodes[0].node.claim_funds(payment_preimage_3);
+ let _ = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ check_added_monitors!(nodes[0], 1);
+ expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000);
+
+ let mut unrevoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
+ assert_eq!(unrevoked_local_txn.len(), 3); // commitment + 2 HTLC txn
+ // Sort the unrevoked transactions in reverse order, ie commitment tx, then HTLC 1 then HTLC 3
+ unrevoked_local_txn.sort_unstable_by_key(|tx| 1_000_000 - tx.output.iter().map(|outp| outp.value).sum::<u64>());
+
+ // Now mine A's old commitment transaction, which should close the channel, but take no action
+ // on any of the HTLCs, at least until we get six confirmations (which we won't get).
+ mine_transaction(&nodes[1], &revoked_local_txn[0]);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_closed_broadcast!(nodes[1], true);
+
+ // Connect up to one block before the revoked transaction would be considered final, then do a
+ // reorg that disconnects the full chain and goes up to the height at which the revoked
+ // transaction would be final.
+ let theoretical_conf_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ disconnect_all_blocks(&nodes[1]);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ connect_blocks(&nodes[1], theoretical_conf_height);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Now connect A's latest commitment transaction instead and resolve the HTLCs
+ mine_transaction(&nodes[1], &unrevoked_local_txn[0]);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Connect the HTLC claim transaction for HTLC 3
+ mine_transaction(&nodes[1], &unrevoked_local_txn[2]);
+ expect_payment_sent!(nodes[1], payment_preimage_3);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Connect blocks to confirm the unrevoked commitment transaction
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+ expect_payment_failed!(nodes[1], payment_hash_4, false);
+}
+
+fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_unconfirmed: bool, connect_style: ConnectStyle) {
+ // After creating a chan between nodes, we disconnect all blocks previously seen to force a
+ // channel close on nodes[0] side. We also use this to provide very basic testing of logic
+ // around freeing background events which store monitor updates during block_[dis]connected.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ *nodes[0].connect_style.borrow_mut() = connect_style;
+
+ let chan_conf_height = core::cmp::max(nodes[0].best_block_info().1 + 1, nodes[1].best_block_info().1 + 1);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ {
+ let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(peer_state.channel_by_id.len(), 1);
+ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 2);
+ }
+
+ assert_eq!(nodes[0].node.list_channels()[0].confirmations, Some(10));
+ assert_eq!(nodes[1].node.list_channels()[0].confirmations, Some(10));
+
+ if !reorg_after_reload {
+ if use_funding_unconfirmed {
+ let relevant_txids = nodes[0].node.get_relevant_txids();
+ assert_eq!(relevant_txids.len(), 1);
+ let block_hash_opt = relevant_txids[0].1;
+ let expected_hash = nodes[0].get_block_header(chan_conf_height).block_hash();
+ assert_eq!(block_hash_opt, Some(expected_hash));
+ let txid = relevant_txids[0].0;
+ assert_eq!(txid, chan.3.txid());
+ nodes[0].node.transaction_unconfirmed(&txid);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
+ } else if connect_style == ConnectStyle::FullBlockViaListen {
+ disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+ assert_eq!(nodes[0].node.list_channels()[0].confirmations, Some(1));
+ disconnect_blocks(&nodes[0], 1);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
+ } else {
+ disconnect_all_blocks(&nodes[0]);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
+ }
+
+ let relevant_txids = nodes[0].node.get_relevant_txids();
+ assert_eq!(relevant_txids.len(), 0);
+
+ handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
+ check_added_monitors!(nodes[1], 1);
+ {
+ let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(peer_state.channel_by_id.len(), 0);
+ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
+ }
+ }
+
+ if reload_node {
+ // Since we currently have a background event pending, it's good to test that we survive a
+ // serialization roundtrip. Further, this tests the somewhat awkward edge-case of dropping
+ // the Channel object from the ChannelManager, but still having a monitor event pending for
+ // it when we go to deserialize, and then use the ChannelManager.
+ let nodes_0_serialized = nodes[0].node.encode();
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
+
+ reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ }
+
+ if reorg_after_reload {
+ if use_funding_unconfirmed {
+ let relevant_txids = nodes[0].node.get_relevant_txids();
+ assert_eq!(relevant_txids.len(), 1);
+ let block_hash_opt = relevant_txids[0].1;
+ let expected_hash = nodes[0].get_block_header(chan_conf_height).block_hash();
+ assert_eq!(block_hash_opt, Some(expected_hash));
+ let txid = relevant_txids[0].0;
+ assert_eq!(txid, chan.3.txid());
+ nodes[0].node.transaction_unconfirmed(&txid);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ } else if connect_style == ConnectStyle::FullBlockViaListen {
+ disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+ assert_eq!(nodes[0].node.list_channels()[0].confirmations, Some(1));
+ disconnect_blocks(&nodes[0], 1);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
+ } else {
+ disconnect_all_blocks(&nodes[0]);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 0);
+ }
+
+ let relevant_txids = nodes[0].node.get_relevant_txids();
+ assert_eq!(relevant_txids.len(), 0);
+
+ handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
+ check_added_monitors!(nodes[1], 1);
+ {
+ let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+ assert_eq!(peer_state.channel_by_id.len(), 0);
+ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
+ }
+ }
+ // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
+ // is a ChannelForcClosed on the right channel with should_broadcast set.
+ *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
+ nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
+ check_added_monitors!(nodes[0], 1);
+ let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
+ check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+
+ // Now check that we can create a new channel
+ if reload_node && nodes[0].node.per_peer_state.read().unwrap().len() == 0 {
+ // If we dropped the channel before reloading the node, nodes[1] was also dropped from
+ // nodes[0] storage, and hence not connected again on startup. We therefore need to
+ // reconnect to the node before attempting to create a new channel.
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+ }
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ send_payment(&nodes[0], &[&nodes[1]], 8000000);
+}
+
+#[test]
+fn test_unconf_chan() {
+ do_test_unconf_chan(true, true, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(false, true, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(true, false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+
+ do_test_unconf_chan(true, true, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_unconf_chan(false, true, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_unconf_chan(true, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+}
+
+#[test]
+fn test_unconf_chan_via_listen() {
+ do_test_unconf_chan(true, true, false, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(false, true, false, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(true, false, false, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(false, false, false, ConnectStyle::FullBlockViaListen);
+}
+
+#[test]
+fn test_unconf_chan_via_funding_unconfirmed() {
+ do_test_unconf_chan(true, true, true, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(false, true, true, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(true, false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_unconf_chan(false, false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
+
+ do_test_unconf_chan(true, true, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_unconf_chan(false, true, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_unconf_chan(true, false, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_unconf_chan(false, false, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+
+ do_test_unconf_chan(true, true, true, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(false, true, true, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(true, false, true, ConnectStyle::FullBlockViaListen);
+ do_test_unconf_chan(false, false, true, ConnectStyle::FullBlockViaListen);
+}
+
+#[test]
+fn test_set_outpoints_partial_claiming() {
+ // - remote party claim tx, new bump tx
+ // - disconnect remote claiming tx, new bump
+ // - disconnect tx, see no tx anymore
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
+ let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+ let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+
+ // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
+ let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
+ assert_eq!(remote_txn.len(), 3);
+ assert_eq!(remote_txn[0].output.len(), 4);
+ assert_eq!(remote_txn[0].input.len(), 1);
+ assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
+ check_spends!(remote_txn[1], remote_txn[0]);
+ check_spends!(remote_txn[2], remote_txn[0]);
+
+ // Connect blocks on node A to advance height towards TEST_FINAL_CLTV
+ // Provide node A with both preimage
+ nodes[0].node.claim_funds(payment_preimage_1);
+ expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000);
+ nodes[0].node.claim_funds(payment_preimage_2);
+ expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000);
+ check_added_monitors!(nodes[0], 2);
+ nodes[0].node.get_and_clear_pending_msg_events();
+
+ // Connect blocks on node A commitment transaction
+ mine_transaction(&nodes[0], &remote_txn[0]);
+ check_closed_broadcast!(nodes[0], true);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ check_added_monitors!(nodes[0], 1);
+ // Verify node A broadcast tx claiming both HTLCs
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ // ChannelMonitor: claim tx
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], remote_txn[0]);
+ assert_eq!(node_txn[0].input.len(), 2);
+ node_txn.clear();
+ }
+
+ // Connect blocks on node B
+ connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+ check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_added_monitors!(nodes[1], 1);
+ // Verify node B broadcast 2 HTLC-timeout txn
+ let partial_claim_tx = {
+ let mut node_txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
+ assert_eq!(node_txn.len(), 3);
+ check_spends!(node_txn[0], chan.3);
+ check_spends!(node_txn[1], node_txn[0]);
+ check_spends!(node_txn[2], node_txn[0]);
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[2].input.len(), 1);
+ assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
+ node_txn.remove(1)
+ };
+
+ // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
+ mine_transaction(&nodes[0], &partial_claim_tx);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], remote_txn[0]);
+ assert_eq!(node_txn[0].input.len(), 1); //dropped HTLC
+ node_txn.clear();
+ }
+ nodes[0].node.get_and_clear_pending_msg_events();
+
+ // Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
+ disconnect_blocks(&nodes[0], 1);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], remote_txn[0]);
+ assert_eq!(node_txn[0].input.len(), 2); //resurrected HTLC
+ node_txn.clear();
+ }
+
+ //// Disconnect one more block and then reconnect multiple no transaction should be generated
+ disconnect_blocks(&nodes[0], 1);
+ connect_blocks(&nodes[0], 15);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 0);
+ node_txn.clear();
+ }
+}
+
+fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
+ // In previous code, detection of to_remote outputs in a counterparty commitment transaction
+ // was dependent on whether a local commitment transaction had been seen on-chain previously.
+ // This resulted in some edge cases around not being able to generate a SpendableOutput event
+ // after a reorg.
+ //
+ // Here, we test this by first confirming one set of commitment transactions, then
+ // disconnecting them and reconnecting another. We then confirm them and check that the correct
+ // SpendableOutput event is generated.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ *nodes[0].connect_style.borrow_mut() = style;
+ *nodes[1].connect_style.borrow_mut() = style;
+
+ let (_, _, chan_id, funding_tx) =
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
+ let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+ assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+ let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
+ let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
+
+ mine_transaction(&nodes[0], &remote_txn_a[0]);
+ mine_transaction(&nodes[1], &remote_txn_a[0]);
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ disconnect_blocks(&nodes[0], 1);
+ disconnect_blocks(&nodes[1], 1);
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ mine_transaction(&nodes[0], &remote_txn_b[0]);
+ mine_transaction(&nodes[1], &remote_txn_b[0]);
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+ let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ assert_eq!(node_a_spendable.len(), 1);
+ if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
+ assert_eq!(outputs.len(), 1);
+ let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ check_spends!(spend_tx, remote_txn_b[0]);
+ }
+
+ // nodes[1] is waiting for the to_self_delay to expire, which is many more than
+ // ANTI_REORG_DELAY. Instead, walk it back and confirm the original remote_txn_a commitment
+ // again and check that nodes[1] generates a similar spendable output.
+ // Technically a reorg of ANTI_REORG_DELAY violates our assumptions, so this is undefined by
+ // our API spec, but we currently handle this correctly and there's little reason we shouldn't
+ // in the future.
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ disconnect_blocks(&nodes[1], ANTI_REORG_DELAY);
+ mine_transaction(&nodes[1], &remote_txn_a[0]);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+ let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ assert_eq!(node_b_spendable.len(), 1);
+ if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
+ assert_eq!(outputs.len(), 1);
+ let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ check_spends!(spend_tx, remote_txn_a[0]);
+ }
+}
+
+#[test]
+fn test_to_remote_after_local_detection() {
+ do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirst);
+ do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstReorgsOnlyTip);
+ do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirst);
+ do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstSkippingBlocks);
+ do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstReorgsOnlyTip);
+ do_test_to_remote_after_local_detection(ConnectStyle::FullBlockViaListen);
+}