Fail back dust HTLC of local commitment tx after enough confirmations
authorAntoine Riard <ariard@student.42.fr>
Thu, 7 Feb 2019 01:28:55 +0000 (20:28 -0500)
committerAntoine Riard <ariard@student.42.fr>
Wed, 17 Jul 2019 19:26:33 +0000 (15:26 -0400)
Add test_failure_delay_htlc_local_commitment and
test_no_failure_dust_htlc_local_commitment

Move some bits of check_spend_remote as we need to fail dust HTLCs
which can be spread on both prev/lastest local commitment tx

src/ln/channelmonitor.rs
src/ln/functional_tests.rs

index 59dffa6e90bf39809c89fea0c8da05081b46a1b0..bb6b36323218f07954015f5f21de24648a29c0f7 100644 (file)
@@ -1738,42 +1738,90 @@ impl ChannelMonitor {
        /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
        /// revoked using data in local_claimable_outpoints.
        /// Should not be used if check_spend_revoked_transaction succeeds.
-       fn check_spend_local_transaction(&self, tx: &Transaction, _height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, (Sha256dHash, Vec<TxOut>)) {
+       fn check_spend_local_transaction(&mut self, tx: &Transaction, height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, (Sha256dHash, Vec<TxOut>)) {
                let commitment_txid = tx.txid();
-               // TODO: If we find a match here we need to fail back HTLCs that weren't included in the
-               // broadcast commitment transaction, either because they didn't meet dust or because they
-               // weren't yet included in our commitment transaction(s).
+               let mut local_txn = Vec::new();
+               let mut spendable_outputs = Vec::new();
+               let mut watch_outputs = Vec::new();
+
+               macro_rules! wait_threshold_conf {
+                       ($height: expr, $source: expr, $update: expr, $commitment_tx: expr, $payment_hash: expr) => {
+                               log_info!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
+                               match self.htlc_updated_waiting_threshold_conf.entry($height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
+                                       hash_map::Entry::Occupied(mut entry) => {
+                                               let e = entry.get_mut();
+                                               e.retain(|ref update| update.0 != $source);
+                                               e.push(($source, $update, $payment_hash));
+                                       }
+                                       hash_map::Entry::Vacant(entry) => {
+                                               entry.insert(vec![($source, $update, $payment_hash)]);
+                                       }
+                               }
+                       }
+               }
+
+               macro_rules! append_onchain_update {
+                       ($updates: expr) => {
+                               local_txn.append(&mut $updates.0);
+                               spendable_outputs.append(&mut $updates.1);
+                               watch_outputs.append(&mut $updates.2);
+                       }
+               }
+
+               // HTLCs set may differ between last and previous local commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
+               let mut is_local_tx = false;
+
                if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
                        if local_tx.txid == commitment_txid {
+                               is_local_tx = true;
                                log_trace!(self, "Got latest local commitment tx broadcast, searching for available HTLCs to claim");
                                match self.key_storage {
                                        Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => {
-                                               let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key));
-                                               return (local_txn, spendable_outputs, (commitment_txid, watch_outputs));
+                                               append_onchain_update!(self.broadcast_by_local_state(local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key)));
                                        },
                                        Storage::Watchtower { .. } => {
-                                               let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, &None, &None);
-                                               return (local_txn, spendable_outputs, (commitment_txid, watch_outputs));
+                                               append_onchain_update!(self.broadcast_by_local_state(local_tx, &None, &None));
                                        }
                                }
                        }
                }
                if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
                        if local_tx.txid == commitment_txid {
+                               is_local_tx = true;
                                log_trace!(self, "Got previous local commitment tx broadcast, searching for available HTLCs to claim");
                                match self.key_storage {
                                        Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => {
-                                               let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key));
-                                               return (local_txn, spendable_outputs, (commitment_txid, watch_outputs));
+                                               append_onchain_update!(self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key)));
                                        },
                                        Storage::Watchtower { .. } => {
-                                               let (local_txn, spendable_outputs, watch_outputs) = self.broadcast_by_local_state(local_tx, &None, &None);
-                                               return (local_txn, spendable_outputs, (commitment_txid, watch_outputs));
+                                               append_onchain_update!(self.broadcast_by_local_state(local_tx, &None, &None));
                                        }
                                }
                        }
                }
-               (Vec::new(), Vec::new(), (commitment_txid, Vec::new()))
+
+               macro_rules! fail_dust_htlcs_after_threshold_conf {
+                       ($local_tx: expr) => {
+                               for &(ref htlc, _, ref source) in &$local_tx.htlc_outputs {
+                                       if htlc.transaction_output_index.is_none() {
+                                               if let &Some(ref source) = source {
+                                                       wait_threshold_conf!(height, source.clone(), None, "lastest", htlc.payment_hash.clone());
+                                               }
+                                       }
+                               }
+                       }
+               }
+
+               if is_local_tx {
+                       if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+                               fail_dust_htlcs_after_threshold_conf!(local_tx);
+                       }
+                       if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
+                               fail_dust_htlcs_after_threshold_conf!(local_tx);
+                       }
+               }
+
+               (local_txn, spendable_outputs, (commitment_txid, watch_outputs))
        }
 
        /// Generate a spendable output event when closing_transaction get registered onchain.
index 4b58e3b29e6a8587cdac27e19718860c05273015..0b8ce98da8ef187fb3a3a431ad2599a6e791332c 100644 (file)
@@ -27,7 +27,7 @@ use bitcoin::util::bip143;
 use bitcoin::util::address::Address;
 use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey};
 use bitcoin::blockdata::block::{Block, BlockHeader};
-use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType};
+use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType, OutPoint as BitcoinOutPoint};
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
 use bitcoin::blockdata::constants::genesis_block;
@@ -5513,3 +5513,143 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
 
        check_added_monitors!(nodes[1], 1);
 }
+
+fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
+       // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach HTLC_FAIL_ANTI_REORG_DELAY
+       // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
+       // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
+
+       let nodes = create_network(2);
+       let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
+
+       let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+
+       // We route 2 dust-HTLCs between A and B
+       let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+       // Cache one local commitment tx as previous
+       let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+       // Fail one HTLC to prune it in the will-be-latest-local commitment tx
+       assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
+       check_added_monitors!(nodes[1], 0);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+
+       let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]).unwrap();
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       // Cache one local commitment tx as lastest
+       let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       match events[0] {
+               MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
+                       assert_eq!(node_id, nodes[1].node.get_our_node_id());
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               MessageSendEvent::UpdateHTLCs { node_id, .. } => {
+                       assert_eq!(node_id, nodes[1].node.get_our_node_id());
+               },
+               _ => panic!("Unexpected event"),
+       }
+
+       assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
+       // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       if announce_latest {
+               nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_last_commitment_tx[0]], &[1; 1]);
+       } else {
+               nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_prev_commitment_tx[0]], &[1; 1]);
+       }
+
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+
+       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+       connect_blocks(&nodes[0].chain_monitor, HTLC_FAIL_ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
+       let events = nodes[0].node.get_and_clear_pending_events();
+       // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
+       assert_eq!(events.len(), 2);
+       let mut first_failed = false;
+       for event in events {
+               match event {
+                       Event::PaymentFailed { payment_hash, .. } => {
+                               if payment_hash == payment_hash_1 {
+                                       assert!(!first_failed);
+                                       first_failed = true;
+                               } else {
+                                       assert_eq!(payment_hash, payment_hash_2);
+                               }
+                       }
+                       _ => panic!("Unexpected event"),
+               }
+       }
+}
+
+#[test]
+fn test_failure_delay_dust_htlc_local_commitment() {
+       do_test_failure_delay_dust_htlc_local_commitment(true);
+       do_test_failure_delay_dust_htlc_local_commitment(false);
+}
+
+#[test]
+fn test_no_failure_dust_htlc_local_commitment() {
+       // Transaction filters for failing back dust htlc based on local commitment txn infos has been
+       // prone to error, we test here that a dummy transaction don't fail them.
+
+       let nodes = create_network(2);
+       let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+       // Rebalance a bit
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+       let as_dust_limit = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+       let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+
+       // We route 2 dust-HTLCs between A and B
+       let (preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (preimage_2, _) = route_payment(&nodes[1], &[&nodes[0]], as_dust_limit*1000);
+
+       // Build a dummy invalid transaction trying to spend a commitment tx
+       let input = TxIn {
+               previous_output: BitcoinOutPoint { txid: chan.3.txid(), vout: 0 },
+               script_sig: Script::new(),
+               sequence: 0,
+               witness: Vec::new(),
+       };
+
+       let outp = TxOut {
+               script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
+               value: 10000,
+       };
+
+       let dummy_tx = Transaction {
+               version: 2,
+               lock_time: 0,
+               input: vec![input],
+               output: vec![outp]
+       };
+
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       nodes[0].chan_monitor.simple_monitor.block_connected(&header, 1, &[&dummy_tx], &[1;1]);
+       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+       assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
+       // We broadcast a few more block to check everything is all right
+       connect_blocks(&nodes[0].chain_monitor, 20, 1, true,  header.bitcoin_hash());
+       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+       assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
+
+       claim_payment(&nodes[0], &vec!(&nodes[1])[..], preimage_1);
+       claim_payment(&nodes[1], &vec!(&nodes[0])[..], preimage_2);
+}