From: Antoine Riard Date: Wed, 11 Mar 2020 19:10:29 +0000 (-0400) Subject: Fix duplicata of adjusted justice tx generation in OnchainTxHandler X-Git-Tag: v0.0.12~101^2~2 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=0d45ddc9e235eaf756d8e09118daa2e9ee88e639;p=rust-lightning Fix duplicata of adjusted justice tx generation in OnchainTxHandler Adjusted tx occurs when a previous aggregated claim tx has seen one of its outpoint being partially claimed by a remote tx. To pursue claiming of the remaining outpoint a adjusted claim tx is generated with leftover of claimable outpoints. Previously, in case of block-rescan where a partial claim occurs, we would generate duplicated adjusted tx, wrongly inflating feerate for next bumps. At rescan, if input has already been dropped from outpoints map from a claiming request, don't regenerate again a adjuste tx. --- diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 703c593fb..38b9471e3 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4182,9 +4182,14 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_closed_broadcast!(nodes[1], false); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 4 ); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-timeout, adjusted justice tx, ChannelManager: local commitment tx + assert_eq!(node_txn.len(), 4); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-timeout, adjusted justice tx, ChannelManager: local commitment tx + assert_eq!(node_txn[0].input.len(), 2); + check_spends!(node_txn[0], revoked_local_txn[0]); + check_spends!(node_txn[1], chan_1.3); assert_eq!(node_txn[2].input.len(), 1); check_spends!(node_txn[2], revoked_htlc_txn[0]); + assert_eq!(node_txn[3].input.len(), 1); + check_spends!(node_txn[3], revoked_local_txn[0]); // Check B's ChannelMonitor was able to generate the right spendable output descriptor let spend_txn = check_spendable_outputs!(nodes[1], 1); diff --git a/lightning/src/ln/onchaintx.rs b/lightning/src/ln/onchaintx.rs index e456b7164..3e985a37f 100644 --- a/lightning/src/ln/onchaintx.rs +++ b/lightning/src/ln/onchaintx.rs @@ -573,9 +573,11 @@ impl OnchainTxHandler { if set_equality { clean_claim_request_after_safety_delay!(); } else { // If false, generate new claim request with update outpoint set + let mut at_least_one_drop = false; for input in tx.input.iter() { if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) { claimed_outputs_material.push((input.previous_output, input_material)); + at_least_one_drop = true; } // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY. if claim_material.per_input_material.is_empty() { @@ -583,7 +585,9 @@ impl OnchainTxHandler { } } //TODO: recompute soonest_timelock to avoid wasting a bit on fees - bump_candidates.insert(first_claim_txid_height.0.clone()); + if at_least_one_drop { + bump_candidates.insert(first_claim_txid_height.0.clone()); + } } break; //No need to iterate further, either tx is our or their } else {