Enforce MINIMALIF-compliant witness for timeout tx
[rust-lightning] / lightning / src / ln / onchaintx.rs
index 7bacc91a441a6113fe263d3caa45ecab3ec0485f..40d9f18abb71efc445acb26218a6c4e89a3cc99b 100644 (file)
@@ -14,10 +14,10 @@ use secp256k1::Secp256k1;
 use secp256k1;
 
 use ln::msgs::DecodeError;
-use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial};
+use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
 use ln::chan_utils::HTLCType;
 use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
-use chain::keysinterface::SpendableOutputDescriptor;
+use chain::keysinterface::ChannelKeys;
 use util::logger::Logger;
 use util::ser::{ReadableArgs, Readable, Writer, Writeable};
 use util::byte_utils;
@@ -139,10 +139,11 @@ macro_rules! subtract_high_prio_fee {
 
 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
 /// do RBF bumping if possible.
-#[derive(Clone)]
-pub struct OnchainTxHandler {
+pub struct OnchainTxHandler<ChanSigner: ChannelKeys> {
        destination_script: Script,
 
+       key_storage: ChanSigner,
+
        // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
        // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
        // another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
@@ -176,10 +177,12 @@ pub struct OnchainTxHandler {
        logger: Arc<Logger>
 }
 
-impl Writeable for OnchainTxHandler {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+impl<ChanSigner: ChannelKeys + Writeable> OnchainTxHandler<ChanSigner> {
+       pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
                self.destination_script.write(writer)?;
 
+               self.key_storage.write(writer)?;
+
                writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
                for (ref ancestor_claim_txid, claim_tx_data) in self.pending_claim_requests.iter() {
                        ancestor_claim_txid.write(writer)?;
@@ -215,10 +218,12 @@ impl Writeable for OnchainTxHandler {
        }
 }
 
-impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
+impl<ChanSigner: ChannelKeys + Readable> ReadableArgs<Arc<Logger>> for OnchainTxHandler<ChanSigner> {
        fn read<R: ::std::io::Read>(reader: &mut R, logger: Arc<Logger>) -> Result<Self, DecodeError> {
                let destination_script = Readable::read(reader)?;
 
+               let key_storage = Readable::read(reader)?;
+
                let pending_claim_requests_len: u64 = Readable::read(reader)?;
                let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
                for _ in 0..pending_claim_requests_len {
@@ -264,6 +269,7 @@ impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
 
                Ok(OnchainTxHandler {
                        destination_script,
+                       key_storage,
                        claimable_outpoints,
                        pending_claim_requests,
                        onchain_events_waiting_threshold_conf,
@@ -273,10 +279,14 @@ impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
        }
 }
 
-impl OnchainTxHandler {
-       pub(super) fn new(destination_script: Script, logger: Arc<Logger>) -> Self {
+impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
+       pub(super) fn new(destination_script: Script, keys: ChanSigner, logger: Arc<Logger>) -> Self {
+
+               let key_storage = keys;
+
                OnchainTxHandler {
                        destination_script,
+                       key_storage,
                        pending_claim_requests: HashMap::new(),
                        claimable_outpoints: HashMap::new(),
                        onchain_events_waiting_threshold_conf: HashMap::new(),
@@ -316,6 +326,11 @@ impl OnchainTxHandler {
                tx_weight
        }
 
+       /// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
+       /// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
+       /// height than once reached we should generate a new bumped "version" of the claim tx to be sure than we safely claim outputs before
+       /// than our counterparty can do it too. If timelock expires soon, height timer is going to be scale down in consequence to increase
+       /// frequency of the bump and so increase our bets of success.
        fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
                if timelock_expiration <= current_height + 3 {
                        return current_height + 1
@@ -390,13 +405,15 @@ impl OnchainTxHandler {
                        }
                }
 
+               // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
+               // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
                let new_timer = Self::get_height_timer(height, cached_claim_datas.soonest_timelock);
                let mut inputs_witnesses_weight = 0;
                let mut amt = 0;
                for per_outp_material in cached_claim_datas.per_input_material.values() {
                        match per_outp_material {
-                               &InputMaterial::Revoked { ref script, ref is_htlc, ref amount, .. } => {
-                                       inputs_witnesses_weight += Self::get_witnesses_weight(if !is_htlc { &[InputDescriptors::RevokedOutput] } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::OfferedHTLC) { &[InputDescriptors::RevokedOfferedHTLC] } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::AcceptedHTLC) { &[InputDescriptors::RevokedReceivedHTLC] } else { unreachable!() });
+                               &InputMaterial::Revoked { ref witness_script, ref is_htlc, ref amount, .. } => {
+                                       inputs_witnesses_weight += Self::get_witnesses_weight(if !is_htlc { &[InputDescriptors::RevokedOutput] } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::OfferedHTLC) { &[InputDescriptors::RevokedOfferedHTLC] } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::AcceptedHTLC) { &[InputDescriptors::RevokedReceivedHTLC] } else { unreachable!() });
                                        amt += *amount;
                                },
                                &InputMaterial::RemoteHTLC { ref preimage, ref amount, .. } => {
@@ -429,9 +446,9 @@ impl OnchainTxHandler {
 
                for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
                        match per_outp_material {
-                               &InputMaterial::Revoked { ref script, ref pubkey, ref key, ref is_htlc, ref amount } => {
+                               &InputMaterial::Revoked { ref witness_script, ref pubkey, ref key, ref is_htlc, ref amount } => {
                                        let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
-                                       let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &script, *amount)[..]);
+                                       let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &witness_script, *amount)[..]);
                                        let sig = self.secp_ctx.sign(&sighash, &key);
                                        bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
                                        bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
@@ -440,22 +457,22 @@ impl OnchainTxHandler {
                                        } else {
                                                bumped_tx.input[i].witness.push(vec!(1));
                                        }
-                                       bumped_tx.input[i].witness.push(script.clone().into_bytes());
-                                       log_trace!(self, "Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}...", bumped_tx.txid(), if !is_htlc { "to_local" } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::OfferedHTLC) { "offered" } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::AcceptedHTLC) { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
+                                       bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
+                                       log_trace!(self, "Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}...", bumped_tx.txid(), if !is_htlc { "to_local" } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::OfferedHTLC) { "offered" } else if HTLCType::scriptlen_to_htlctype(witness_script.len()) == Some(HTLCType::AcceptedHTLC) { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
                                },
-                               &InputMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount, ref locktime } => {
+                               &InputMaterial::RemoteHTLC { ref witness_script, ref key, ref preimage, ref amount, ref locktime } => {
                                        if !preimage.is_some() { bumped_tx.lock_time = *locktime }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
                                        let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
-                                       let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &script, *amount)[..]);
+                                       let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &witness_script, *amount)[..]);
                                        let sig = self.secp_ctx.sign(&sighash, &key);
                                        bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
                                        bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
                                        if let &Some(preimage) = preimage {
                                                bumped_tx.input[i].witness.push(preimage.clone().0.to_vec());
                                        } else {
-                                               bumped_tx.input[i].witness.push(vec![0]);
+                                               bumped_tx.input[i].witness.push(vec![]);
                                        }
-                                       bumped_tx.input[i].witness.push(script.clone().into_bytes());
+                                       bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
                                        log_trace!(self, "Going to broadcast Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {}...", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
                                },
                                &InputMaterial::LocalHTLC { .. } => {
@@ -471,31 +488,29 @@ impl OnchainTxHandler {
                Some((new_timer, new_feerate, bumped_tx))
        }
 
-       pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<Vec<(u32, bool, BitcoinOutPoint, InputMaterial)>>, height: u32, broadcaster: B, fee_estimator: F) -> Vec<SpendableOutputDescriptor>
+       pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F)
                where B::Target: BroadcasterInterface,
                      F::Target: FeeEstimator
        {
+               log_trace!(self, "Block at height {} connected with {} claim requests", height, claimable_outpoints.len());
                let mut new_claims = Vec::new();
                let mut aggregated_claim = HashMap::new();
                let mut aggregated_soonest = ::std::u32::MAX;
-               let mut spendable_outputs = Vec::new();
-
-               // Try to aggregate outputs if they're 1) belong to same parent tx, 2) their
-               // timelock expiration isn't imminent (<= CLTV_SHARED_CLAIM_BUFFER).
-               for siblings_outpoints in claimable_outpoints {
-                       for outp in siblings_outpoints {
-                               // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
-                               if let Some(_) = self.claimable_outpoints.get(&outp.2) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", outp.2.txid, outp.2.vout); } else {
-                                       log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", outp.0, height + CLTV_SHARED_CLAIM_BUFFER);
-                                       if outp.0 <= height + CLTV_SHARED_CLAIM_BUFFER || !outp.1 { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
-                                               let mut single_input = HashMap::new();
-                                               single_input.insert(outp.2, outp.3);
-                                               new_claims.push((outp.0, single_input));
-                                       } else {
-                                               aggregated_claim.insert(outp.2, outp.3);
-                                               if outp.0 < aggregated_soonest {
-                                                       aggregated_soonest = outp.0;
-                                               }
+
+               // Try to aggregate outputs if their timelock expiration isn't imminent (absolute_timelock
+               // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
+               for req in claimable_outpoints {
+                       // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
+                       if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
+                               log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
+                               if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
+                                       let mut single_input = HashMap::new();
+                                       single_input.insert(req.outpoint, req.witness_data);
+                                       new_claims.push((req.absolute_timelock, single_input));
+                               } else {
+                                       aggregated_claim.insert(req.outpoint, req.witness_data);
+                                       if req.absolute_timelock < aggregated_soonest {
+                                               aggregated_soonest = req.absolute_timelock;
                                        }
                                }
                        }
@@ -516,10 +531,6 @@ impl OnchainTxHandler {
                                        self.claimable_outpoints.insert(k.clone(), (txid, height));
                                }
                                log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
-                               spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
-                                       outpoint: BitcoinOutPoint { txid: tx.txid(), vout: 0 },
-                                       output: tx.output[0].clone(),
-                               });
                                broadcaster.broadcast_transaction(&tx);
                        }
                }
@@ -568,9 +579,11 @@ impl OnchainTxHandler {
                                                if set_equality {
                                                        clean_claim_request_after_safety_delay!();
                                                } else { // If false, generate new claim request with update outpoint set
+                                                       let mut at_least_one_drop = false;
                                                        for input in tx.input.iter() {
                                                                if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
                                                                        claimed_outputs_material.push((input.previous_output, input_material));
+                                                                       at_least_one_drop = true;
                                                                }
                                                                // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
                                                                if claim_material.per_input_material.is_empty() {
@@ -578,7 +591,9 @@ impl OnchainTxHandler {
                                                                }
                                                        }
                                                        //TODO: recompute soonest_timelock to avoid wasting a bit on fees
-                                                       bump_candidates.insert(first_claim_txid_height.0.clone());
+                                                       if at_least_one_drop {
+                                                               bump_candidates.insert(first_claim_txid_height.0.clone());
+                                                       }
                                                }
                                                break; //No need to iterate further, either tx is our or their
                                        } else {
@@ -629,6 +644,7 @@ impl OnchainTxHandler {
                }
 
                // Build, bump and rebroadcast tx accordingly
+               log_trace!(self, "Bumping {} candidates", bump_candidates.len());
                for first_claim_txid in bump_candidates.iter() {
                        if let Some((new_timer, new_feerate)) = {
                                if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) {
@@ -645,8 +661,6 @@ impl OnchainTxHandler {
                                } else { unreachable!(); }
                        }
                }
-
-               spendable_outputs
        }
 
        pub(super) fn block_disconnected<B: Deref, F: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F)