Swap key_derivation_params (u64, u64) for channel_keys_id [u8; 32]
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 6bac24911ad2fbb6005e180fe0bd22ebe3478573..4c15a95eb0e4baeb34ab1e63115727344f8f7234 100644 (file)
@@ -2470,7 +2470,9 @@ fn test_justice_tx() {
        bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
        bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
        let user_cfgs = [Some(alice_config), Some(bob_config)];
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
+       chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -2600,7 +2602,8 @@ fn revoked_output_claim() {
 #[test]
 fn claim_htlc_outputs_shared_tx() {
        // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -2670,7 +2673,8 @@ fn claim_htlc_outputs_shared_tx() {
 #[test]
 fn claim_htlc_outputs_single_tx() {
        // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -3396,7 +3400,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
+       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
@@ -3457,7 +3461,7 @@ fn test_force_close_fail_back() {
        // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
 
-       nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
+       nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
        check_closed_broadcast!(nodes[2], false);
        check_added_monitors!(nodes[2], 1);
        let tx = {
@@ -4658,7 +4662,7 @@ macro_rules! check_spendable_outputs {
                                        Event::SpendableOutputs { ref outputs } => {
                                                for outp in outputs {
                                                        match *outp {
-                                                               SpendableOutputDescriptor::StaticOutputCounterpartyPayment { ref outpoint, ref output, ref key_derivation_params } => {
+                                                               SpendableOutputDescriptor::StaticOutputCounterpartyPayment { ref outpoint, ref output, ref channel_keys_id } => {
                                                                        let input = TxIn {
                                                                                previous_output: outpoint.into_bitcoin_outpoint(),
                                                                                script_sig: Script::new(),
@@ -4677,7 +4681,7 @@ macro_rules! check_spendable_outputs {
                                                                        };
                                                                        spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 35 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4
                                                                        let secp_ctx = Secp256k1::new();
-                                                                       let keys = $keysinterface.derive_channel_keys($chan_value, key_derivation_params.0, key_derivation_params.1);
+                                                                       let keys = $keysinterface.derive_channel_keys($chan_value, channel_keys_id);
                                                                        let remotepubkey = keys.pubkeys().payment_point;
                                                                        let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
                                                                        let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap();
@@ -4687,7 +4691,7 @@ macro_rules! check_spendable_outputs {
                                                                        spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec());
                                                                        txn.push(spend_tx);
                                                                },
-                                                               SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref revocation_pubkey } => {
+                                                               SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref channel_keys_id, ref revocation_pubkey } => {
                                                                        let input = TxIn {
                                                                                previous_output: outpoint.into_bitcoin_outpoint(),
                                                                                script_sig: Script::new(),
@@ -4705,7 +4709,7 @@ macro_rules! check_spendable_outputs {
                                                                                output: vec![outp],
                                                                        };
                                                                        let secp_ctx = Secp256k1::new();
-                                                                       let keys = $keysinterface.derive_channel_keys($chan_value, key_derivation_params.0, key_derivation_params.1);
+                                                                       let keys = $keysinterface.derive_channel_keys($chan_value, channel_keys_id);
                                                                        if let Ok(delayed_payment_key) = chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &keys.inner.delayed_payment_base_key) {
 
                                                                                let delayed_payment_pubkey = PublicKey::from_secret_key(&secp_ctx, &delayed_payment_key);
@@ -4779,7 +4783,7 @@ fn test_claim_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
-       nodes[1].node.force_close_channel(&chan.2);
+       nodes[1].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -4806,7 +4810,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
-       nodes[0].node.force_close_channel(&chan.2);
+       nodes[0].node.force_close_channel(&chan.2).unwrap();
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
@@ -4993,7 +4997,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
 
 #[test]
 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -5059,7 +5064,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
 
 #[test]
 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -7040,7 +7046,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
        // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
 
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -7379,7 +7386,10 @@ fn test_data_loss_protect() {
        let fee_estimator;
        let tx_broadcaster;
        let chain_source;
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
+       // during signing due to revoked tx
+       chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
        let keys_manager = &chanmon_cfgs[0].keys_manager;
        let monitor;
        let node_state_0;
@@ -7699,7 +7709,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
        // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
 
-       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -8542,7 +8553,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
        let mut force_closing_node = 0; // Alice force-closes
        if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
-       nodes[force_closing_node].node.force_close_channel(&chan_ab.2);
+       nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], false);
        check_added_monitors!(nodes[force_closing_node], 1);
        if go_onchain_before_fulfill {
@@ -8825,3 +8836,66 @@ fn test_duplicate_chan_id() {
        update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
        send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
 }
+
+#[test]
+fn test_error_chans_closed() {
+       // Test that we properly handle error messages, closing appropriate channels.
+       //
+       // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
+       // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
+       // we can test various edge cases around it to ensure we don't regress.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Create some initial channels
+       let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+       let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+       let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
+       assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
+       assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
+
+       // Closing a channel from a different peer has no effect
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
+
+       // Closing one channel doesn't impact others
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
+       check_added_monitors!(nodes[0], 1);
+       check_closed_broadcast!(nodes[0], false);
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
+       assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
+       assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
+
+       // A null channel ID should close all channels
+       let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
+       check_added_monitors!(nodes[0], 2);
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+                       assert_eq!(msg.contents.flags & 2, 2);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+                       assert_eq!(msg.contents.flags & 2, 2);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       // Note that at this point users of a standard PeerHandler will end up calling
+       // peer_disconnected with no_connection_possible set to false, duplicating the
+       // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
+       // users with their own peer handling logic. We duplicate the call here, however.
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+       assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+       assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
+}