+
+#[test]
+fn test_set_outpoints_partial_claiming() {
+ // - remote party claim tx, new bump tx
+ // - disconnect remote claiming tx, new bump
+ // - disconnect tx, see no tx anymore
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::supported(), InitFeatures::supported());
+ let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
+ let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
+
+ // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
+ let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+ assert_eq!(remote_txn.len(), 3);
+ assert_eq!(remote_txn[0].output.len(), 4);
+ assert_eq!(remote_txn[0].input.len(), 1);
+ assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
+ check_spends!(remote_txn[1], remote_txn[0].clone());
+ check_spends!(remote_txn[2], remote_txn[0].clone());
+
+ // Connect blocks on node A to advance height towards TEST_FINAL_CLTV
+ let prev_header_100 = connect_blocks(&nodes[1].block_notifier, 100, 0, false, Default::default());
+ // Provide node A with both preimage
+ nodes[0].node.claim_funds(payment_preimage_1, 3_000_000);
+ nodes[0].node.claim_funds(payment_preimage_2, 3_000_000);
+ check_added_monitors!(nodes[0], 2);
+ nodes[0].node.get_and_clear_pending_events();
+ nodes[0].node.get_and_clear_pending_msg_events();
+
+ // Connect blocks on node A commitment transaction
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
+ // Verify node A broadcast tx claiming both HTLCs
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ // ChannelMonitor: claim tx, ChannelManager: local commitment tx + HTLC-Success*2
+ assert_eq!(node_txn.len(), 4);
+ check_spends!(node_txn[0], remote_txn[0].clone());
+ check_spends!(node_txn[1], chan.3.clone());
+ check_spends!(node_txn[2], node_txn[1]);
+ check_spends!(node_txn[3], node_txn[1]);
+ assert_eq!(node_txn[0].input.len(), 2);
+ node_txn.clear();
+ }
+ nodes[0].node.get_and_clear_pending_msg_events();
+
+ // Connect blocks on node B
+ connect_blocks(&nodes[1].block_notifier, 135, 0, false, Default::default());
+ // Verify node B broadcast 2 HTLC-timeout txn
+ let partial_claim_tx = {
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3);
+ check_spends!(node_txn[1], node_txn[0].clone());
+ check_spends!(node_txn[2], node_txn[0].clone());
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[2].input.len(), 1);
+ node_txn[1].clone()
+ };
+ nodes[1].node.get_and_clear_pending_msg_events();
+
+ // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![partial_claim_tx.clone()] }, 102);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], remote_txn[0].clone());
+ assert_eq!(node_txn[0].input.len(), 1); //dropped HTLC
+ node_txn.clear();
+ }
+ nodes[0].node.get_and_clear_pending_msg_events();
+
+ // Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
+ nodes[0].block_notifier.block_disconnected(&header, 102);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], remote_txn[0].clone());
+ assert_eq!(node_txn[0].input.len(), 2); //resurrected HTLC
+ node_txn.clear();
+ }
+
+ //// Disconnect one more block and then reconnect multiple no transaction should be generated
+ nodes[0].block_notifier.block_disconnected(&header, 101);
+ connect_blocks(&nodes[1].block_notifier, 15, 101, false, prev_header_100);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 0);
+ node_txn.clear();
+ }
+}
+
+#[test]
+fn test_counterparty_raa_skip_no_crash() {
+ // Previously, if our counterparty sent two RAAs in a row without us having provided a
+ // commitment transaction, we would have happily carried on and provided them the next
+ // commitment transaction based on one RAA forward. This would probably eventually have led to
+ // channel closure, but it would not have resulted in funds loss. Still, our
+ // EnforcingChannelKeys would have paniced as it doesn't like jumps into the future. Here, we
+ // check simply that the channel is closed in response to such an RAA, but don't check whether
+ // we decide to punish our counterparty for revoking their funds (as we don't currently
+ // implement that).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
+
+ let commitment_seed = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&channel_id).unwrap().local_keys.commitment_seed().clone();
+ const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+ let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
+ &SecretKey::from_slice(&chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+ let per_commitment_secret = chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
+ &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
+ assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
+}
+
+#[test]
+fn test_bump_txn_sanitize_tracking_maps() {
+ // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
+ // verify we clean then right after expiration of ANTI_REORG_DELAY.
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::supported(), InitFeatures::supported());
+ // Lock HTLC in both directions
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
+ route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+
+ let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
+
+ // Revoke local commitment tx
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 9_000_000);
+
+ // Broadcast set of revoked txn on A
+ let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, false, Default::default());
+ let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129);
+ check_closed_broadcast!(nodes[0], false);
+ let penalty_txn = {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+ check_spends!(node_txn[1], revoked_local_txn[0].clone());
+ check_spends!(node_txn[2], revoked_local_txn[0].clone());
+ let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
+ node_txn.clear();
+ penalty_txn
+ };
+ let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].block_notifier.block_connected(&Block { header: header_130, txdata: penalty_txn }, 130);
+ connect_blocks(&nodes[0].block_notifier, 5, 130, false, header_130.bitcoin_hash());
+ {
+ let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+ if let Some(monitor) = monitors.get(&OutPoint::new(chan.3.txid(), 0)) {
+ assert!(monitor.pending_claim_requests.is_empty());
+ assert!(monitor.claimable_outpoints.is_empty());
+ }
+ }
+}