Add test_bump_penalty_txn_on_revoked_commitment
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 83ca1e422ec7104f1433fc28becdb30510d7c698..65ae208bac37ea6d44ee6a3f34311867cf58d81b 100644 (file)
@@ -1435,7 +1435,7 @@ fn do_channel_reserve_test(test_recv: bool) {
                let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
                let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
                let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
-               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
                let msg = msgs::UpdateAddHTLC {
                        channel_id: chan_1.2,
                        htlc_id,
@@ -1783,8 +1783,10 @@ fn channel_monitor_network_test() {
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
        nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+       let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
+               node2_commitment_txid = node_txn[0].txid();
 
                // Claim the payment on nodes[3], giving it knowledge of the preimage
                claim_funds!(nodes[3], nodes[2], payment_preimage_1, 3_000_000);
@@ -1818,6 +1820,16 @@ fn channel_monitor_network_test() {
                        nodes[3].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
 
+               // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
+               {
+                       let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
+                       node_txn.retain(|tx| {
+                               if tx.input[0].previous_output.txid == node2_commitment_txid {
+                                       false
+                               } else { true }
+                       });
+               }
+
                let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
 
                // Claim the payment on nodes[4], giving it knowledge of the preimage
@@ -1846,11 +1858,11 @@ fn channel_monitor_network_test() {
 #[test]
 fn test_justice_tx() {
        // Test justice txn built on revoked HTLC-Success tx, against both sides
-       let mut alice_config = UserConfig::new();
+       let mut alice_config = UserConfig::default();
        alice_config.channel_options.announced_channel = true;
        alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
        alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
-       let mut bob_config = UserConfig::new();
+       let mut bob_config = UserConfig::default();
        bob_config.channel_options.announced_channel = true;
        bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
        bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
@@ -1884,6 +1896,7 @@ fn test_justice_tx() {
 
                        check_spends!(node_txn[0], revoked_local_txn[0].clone());
                        node_txn.swap_remove(0);
+                       node_txn.truncate(1);
                }
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
@@ -1901,6 +1914,10 @@ fn test_justice_tx() {
        // We test justice_tx build by A on B's revoked HTLC-Success tx
        // Create some new channels:
        let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+       {
+               let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               node_txn.clear();
+       }
 
        // A pending HTLC which will be revoked:
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
@@ -2078,7 +2095,7 @@ fn claim_htlc_outputs_single_tx() {
                }
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 22); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + 5 * (1 local commitment tx + 1 htlc timeout tx)
+               assert_eq!(node_txn.len(), 29); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + 5 * (1 local commitment tx + 1 htlc timeout tx)
 
                assert_eq!(node_txn[0], node_txn[7]);
                assert_eq!(node_txn[1], node_txn[8]);
@@ -2088,10 +2105,6 @@ fn claim_htlc_outputs_single_tx() {
                assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcasted by ChannelManger
                assert_eq!(node_txn[4], node_txn[6]);
 
-               for i in 12..22 {
-                       if i % 2 == 0 { assert_eq!(node_txn[3], node_txn[i]); } else { assert_eq!(node_txn[4], node_txn[i]); }
-               }
-
                assert_eq!(node_txn[0].input.len(), 1);
                assert_eq!(node_txn[1].input.len(), 1);
                assert_eq!(node_txn[2].input.len(), 1);
@@ -3388,7 +3401,7 @@ fn test_no_txn_manager_serialize_deserialize() {
        assert!(chan_0_monitor_read.is_empty());
 
        let mut nodes_0_read = &nodes_0_serialized[..];
-       let config = UserConfig::new();
+       let config = UserConfig::default();
        let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
        let (_, nodes_0_deserialized) = {
                let mut channel_monitors = HashMap::new();
@@ -3458,7 +3471,7 @@ fn test_simple_manager_serialize_deserialize() {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
                <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
-                       default_config: UserConfig::new(),
+                       default_config: UserConfig::default(),
                        keys_manager,
                        fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
                        monitor: nodes[0].chan_monitor.clone(),
@@ -3518,7 +3531,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut nodes_0_read = &nodes_0_serialized[..];
        let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
        let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
-               default_config: UserConfig::new(),
+               default_config: UserConfig::default(),
                keys_manager,
                fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
                monitor: nodes[0].chan_monitor.clone(),
@@ -4815,7 +4828,7 @@ fn test_onion_failure() {
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
                let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
                onion_payloads[0].realm = 3;
-               msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+               msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
        }, ||{}, true, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here
 
        // final node failure
@@ -4825,7 +4838,7 @@ fn test_onion_failure() {
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
                let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
                onion_payloads[1].realm = 3;
-               msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+               msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
        }, ||{}, false, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
 
        // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
@@ -5003,7 +5016,7 @@ fn test_onion_failure() {
                route.hops[1].cltv_expiry_delta += CLTV_FAR_FAR_AWAY + route.hops[0].cltv_expiry_delta + 1;
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
                let (onion_payloads, _, htlc_cltv) = onion_utils::build_onion_payloads(&route, height).unwrap();
-               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
                msg.cltv_expiry = htlc_cltv;
                msg.onion_routing_packet = onion_packet;
        }, ||{}, true, Some(21), None);
@@ -5253,7 +5266,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
        let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route, &session_priv).unwrap();
        let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
-       let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+       let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
 
        let mut msg = msgs::UpdateAddHTLC {
                channel_id: chan.2,
@@ -5948,7 +5961,7 @@ fn test_upfront_shutdown_script() {
        // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
        // enforce it at shutdown message
 
-       let mut config = UserConfig::new();
+       let mut config = UserConfig::default();
        config.channel_options.announced_channel = true;
        config.peer_channel_config_limits.force_announced_channel_preference = false;
        config.channel_options.commit_upfront_shutdown_pubkey = false;
@@ -6046,9 +6059,9 @@ fn test_upfront_shutdown_script() {
 fn test_user_configurable_csv_delay() {
        // We test our channel constructors yield errors when we pass them absurd csv delay
 
-       let mut low_our_to_self_config = UserConfig::new();
+       let mut low_our_to_self_config = UserConfig::default();
        low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
-       let mut high_their_to_self_config = UserConfig::new();
+       let mut high_their_to_self_config = UserConfig::default();
        high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
        let cfgs = [Some(high_their_to_self_config.clone()), None];
        let nodes = create_network(2, &cfgs);
@@ -6135,11 +6148,11 @@ fn test_data_loss_protect() {
                monitor: monitor.clone(),
                logger: Arc::clone(&logger),
                tx_broadcaster,
-               default_config: UserConfig::new(),
+               default_config: UserConfig::default(),
                channel_monitors: &channel_monitors
        }).unwrap().1;
        nodes[0].node = Arc::new(node_state_0);
-       monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok();
+       assert!(monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok());
        nodes[0].chan_monitor = monitor;
        nodes[0].chain_monitor = chain_monitor;
 
@@ -6256,3 +6269,163 @@ fn test_check_htlc_underpaying() {
        }
        nodes[1].node.get_and_clear_pending_events();
 }
+
+#[test]
+fn test_announce_disable_channels() {
+       // Create 2 channels between A and B. Disconnect B. Call timer_chan_freshness_every_min and check for generated
+       // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
+
+       let nodes = create_network(2, &[None, None]);
+
+       let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()).0.contents.short_channel_id;
+       let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, LocalFeatures::new(), LocalFeatures::new()).0.contents.short_channel_id;
+       let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()).0.contents.short_channel_id;
+
+       // Disconnect peers
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+       nodes[0].node.timer_chan_freshness_every_min(); // dirty -> stagged
+       nodes[0].node.timer_chan_freshness_every_min(); // staged -> fresh
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 3);
+       for e in msg_events {
+               match e {
+                       MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+                               let short_id = msg.contents.short_channel_id;
+                               // Check generated channel_update match list in PendingChannelUpdate
+                               if short_id != short_id_1 && short_id != short_id_2 && short_id != short_id_3 {
+                                       panic!("Generated ChannelUpdate for wrong chan!");
+                               }
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+       // Reconnect peers
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+       let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+       assert_eq!(reestablish_1.len(), 3);
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+       let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+       assert_eq!(reestablish_2.len(), 3);
+
+       // Reestablish chan_1
+       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+       handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+       handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+       // Reestablish chan_2
+       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]).unwrap();
+       handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]).unwrap();
+       handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+       // Reestablish chan_3
+       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]).unwrap();
+       handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]).unwrap();
+       handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+       nodes[0].node.timer_chan_freshness_every_min();
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 0);
+}
+
+#[test]
+fn test_bump_penalty_txn_on_revoked_commitment() {
+       // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
+       // we're able to claim outputs on revoked commitment transaction before timelocks expiration
+
+       let nodes = create_network(2, &[None, None]);
+
+       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, LocalFeatures::new(), LocalFeatures::new());
+       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+       let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 3000000, 30).unwrap();
+       send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
+
+       let revoked_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
+       assert_eq!(revoked_txn[0].output.len(), 4);
+       assert_eq!(revoked_txn[0].input.len(), 1);
+       assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
+       let revoked_txid = revoked_txn[0].txid();
+
+       let mut penalty_sum = 0;
+       for outp in revoked_txn[0].output.iter() {
+               if outp.script_pubkey.is_v0_p2wsh() {
+                       penalty_sum += outp.value;
+               }
+       }
+
+       // Connect blocks to change height_timer range to see if we use right soonest_timelock
+       let header_114 = connect_blocks(&nodes[1].block_notifier, 114, 0, false, Default::default());
+
+       // Actually revoke tx by claiming a HTLC
+       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_txn[0].clone()] }, 115);
+
+       // One or more justice tx should have been broadcast, check it
+       let penalty_1;
+       let feerate_1;
+       {
+               let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn.len(), 4); // justice tx (broadcasted from ChannelMonitor) * 2 (block-reparsing) + local commitment tx + local HTLC-timeout (broadcasted from ChannelManager)
+               assert_eq!(node_txn[0], node_txn[3]);
+               assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
+               assert_eq!(node_txn[0].output.len(), 1);
+               check_spends!(node_txn[0], revoked_txn[0].clone());
+               let fee_1 = penalty_sum - node_txn[0].output[0].value;
+               feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64;
+               penalty_1 = node_txn[0].txid();
+               node_txn.clear();
+       };
+
+       // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
+       let header = connect_blocks(&nodes[1].block_notifier, 3, 115,  true, header.bitcoin_hash());
+       let mut penalty_2 = penalty_1;
+       let mut feerate_2 = 0;
+       {
+               let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn.len(), 1);
+               if node_txn[0].input[0].previous_output.txid == revoked_txid {
+                       assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
+                       assert_eq!(node_txn[0].output.len(), 1);
+                       check_spends!(node_txn[0], revoked_txn[0].clone());
+                       penalty_2 = node_txn[0].txid();
+                       // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
+                       assert_ne!(penalty_2, penalty_1);
+                       let fee_2 = penalty_sum - node_txn[0].output[0].value;
+                       feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
+                       // Verify 25% bump heuristic
+                       assert!(feerate_2 * 100 >= feerate_1 * 125);
+                       node_txn.clear();
+               }
+       }
+       assert_ne!(feerate_2, 0);
+
+       // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
+       connect_blocks(&nodes[1].block_notifier, 3, 118, true, header);
+       let penalty_3;
+       let mut feerate_3 = 0;
+       {
+               let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn.len(), 1);
+               if node_txn[0].input[0].previous_output.txid == revoked_txid {
+                       assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
+                       assert_eq!(node_txn[0].output.len(), 1);
+                       check_spends!(node_txn[0], revoked_txn[0].clone());
+                       penalty_3 = node_txn[0].txid();
+                       // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
+                       assert_ne!(penalty_3, penalty_2);
+                       let fee_3 = penalty_sum - node_txn[0].output[0].value;
+                       feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64;
+                       // Verify 25% bump heuristic
+                       assert!(feerate_3 * 100 >= feerate_2 * 125);
+                       node_txn.clear();
+               }
+       }
+       assert_ne!(feerate_3, 0);
+
+       nodes[1].node.get_and_clear_pending_events();
+       nodes[1].node.get_and_clear_pending_msg_events();
+}