Prevent any update of local commitment transaction once signed
[rust-lightning] / lightning / src / ln / onchaintx.rs
index 1ec68f5eb4d5d1642c41916565fec7d9bc5a3fa1..d0d1f29e691f5ea5118d0a06b2e5bf4030fd73aa 100644 (file)
@@ -15,9 +15,9 @@ use secp256k1;
 
 use ln::msgs::DecodeError;
 use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
-use ln::chan_utils::HTLCType;
+use ln::chan_utils::{HTLCType, LocalCommitmentTransaction};
 use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
-use chain::keysinterface::SpendableOutputDescriptor;
+use chain::keysinterface::ChannelKeys;
 use util::logger::Logger;
 use util::ser::{ReadableArgs, Readable, Writer, Writeable};
 use util::byte_utils;
@@ -139,9 +139,13 @@ macro_rules! subtract_high_prio_fee {
 
 /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
 /// do RBF bumping if possible.
-#[derive(Clone)]
-pub struct OnchainTxHandler {
+pub struct OnchainTxHandler<ChanSigner: ChannelKeys> {
        destination_script: Script,
+       funding_redeemscript: Script,
+       local_commitment: Option<LocalCommitmentTransaction>,
+       prev_local_commitment: Option<LocalCommitmentTransaction>,
+
+       key_storage: ChanSigner,
 
        // Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
        // it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
@@ -176,9 +180,14 @@ pub struct OnchainTxHandler {
        logger: Arc<Logger>
 }
 
-impl Writeable for OnchainTxHandler {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+impl<ChanSigner: ChannelKeys + Writeable> OnchainTxHandler<ChanSigner> {
+       pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
                self.destination_script.write(writer)?;
+               self.funding_redeemscript.write(writer)?;
+               self.local_commitment.write(writer)?;
+               self.prev_local_commitment.write(writer)?;
+
+               self.key_storage.write(writer)?;
 
                writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
                for (ref ancestor_claim_txid, claim_tx_data) in self.pending_claim_requests.iter() {
@@ -215,9 +224,14 @@ impl Writeable for OnchainTxHandler {
        }
 }
 
-impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
+impl<ChanSigner: ChannelKeys + Readable> ReadableArgs<Arc<Logger>> for OnchainTxHandler<ChanSigner> {
        fn read<R: ::std::io::Read>(reader: &mut R, logger: Arc<Logger>) -> Result<Self, DecodeError> {
                let destination_script = Readable::read(reader)?;
+               let funding_redeemscript = Readable::read(reader)?;
+               let local_commitment = Readable::read(reader)?;
+               let prev_local_commitment = Readable::read(reader)?;
+
+               let key_storage = Readable::read(reader)?;
 
                let pending_claim_requests_len: u64 = Readable::read(reader)?;
                let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
@@ -264,6 +278,10 @@ impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
 
                Ok(OnchainTxHandler {
                        destination_script,
+                       funding_redeemscript,
+                       local_commitment,
+                       prev_local_commitment,
+                       key_storage,
                        claimable_outpoints,
                        pending_claim_requests,
                        onchain_events_waiting_threshold_conf,
@@ -273,10 +291,17 @@ impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
        }
 }
 
-impl OnchainTxHandler {
-       pub(super) fn new(destination_script: Script, logger: Arc<Logger>) -> Self {
+impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
+       pub(super) fn new(destination_script: Script, keys: ChanSigner, funding_redeemscript: Script, logger: Arc<Logger>) -> Self {
+
+               let key_storage = keys;
+
                OnchainTxHandler {
                        destination_script,
+                       funding_redeemscript,
+                       local_commitment: None,
+                       prev_local_commitment: None,
+                       key_storage,
                        pending_claim_requests: HashMap::new(),
                        claimable_outpoints: HashMap::new(),
                        onchain_events_waiting_threshold_conf: HashMap::new(),
@@ -460,7 +485,7 @@ impl OnchainTxHandler {
                                        if let &Some(preimage) = preimage {
                                                bumped_tx.input[i].witness.push(preimage.clone().0.to_vec());
                                        } else {
-                                               bumped_tx.input[i].witness.push(vec![0]);
+                                               bumped_tx.input[i].witness.push(vec![]);
                                        }
                                        bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
                                        log_trace!(self, "Going to broadcast Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {}...", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
@@ -478,31 +503,29 @@ impl OnchainTxHandler {
                Some((new_timer, new_feerate, bumped_tx))
        }
 
-       pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<Vec<ClaimRequest>>, height: u32, broadcaster: B, fee_estimator: F) -> Vec<SpendableOutputDescriptor>
+       pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F)
                where B::Target: BroadcasterInterface,
                      F::Target: FeeEstimator
        {
+               log_trace!(self, "Block at height {} connected with {} claim requests", height, claimable_outpoints.len());
                let mut new_claims = Vec::new();
                let mut aggregated_claim = HashMap::new();
                let mut aggregated_soonest = ::std::u32::MAX;
-               let mut spendable_outputs = Vec::new();
-
-               // Try to aggregate outputs if they're 1) belong to same parent tx, 2) their
-               // timelock expiration isn't imminent (<= CLTV_SHARED_CLAIM_BUFFER).
-               for siblings_outpoints in claimable_outpoints {
-                       for req in siblings_outpoints {
-                               // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
-                               if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
-                                       log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
-                                       if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
-                                               let mut single_input = HashMap::new();
-                                               single_input.insert(req.outpoint, req.witness_data);
-                                               new_claims.push((req.absolute_timelock, single_input));
-                                       } else {
-                                               aggregated_claim.insert(req.outpoint, req.witness_data);
-                                               if req.absolute_timelock < aggregated_soonest {
-                                                       aggregated_soonest = req.absolute_timelock;
-                                               }
+
+               // Try to aggregate outputs if their timelock expiration isn't imminent (absolute_timelock
+               // <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
+               for req in claimable_outpoints {
+                       // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
+                       if let Some(_) = self.claimable_outpoints.get(&req.outpoint) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoint.txid, req.outpoint.vout); } else {
+                               log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", req.absolute_timelock, height + CLTV_SHARED_CLAIM_BUFFER);
+                               if req.absolute_timelock <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
+                                       let mut single_input = HashMap::new();
+                                       single_input.insert(req.outpoint, req.witness_data);
+                                       new_claims.push((req.absolute_timelock, single_input));
+                               } else {
+                                       aggregated_claim.insert(req.outpoint, req.witness_data);
+                                       if req.absolute_timelock < aggregated_soonest {
+                                               aggregated_soonest = req.absolute_timelock;
                                        }
                                }
                        }
@@ -523,10 +546,6 @@ impl OnchainTxHandler {
                                        self.claimable_outpoints.insert(k.clone(), (txid, height));
                                }
                                log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
-                               spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
-                                       outpoint: BitcoinOutPoint { txid: tx.txid(), vout: 0 },
-                                       output: tx.output[0].clone(),
-                               });
                                broadcaster.broadcast_transaction(&tx);
                        }
                }
@@ -575,9 +594,11 @@ impl OnchainTxHandler {
                                                if set_equality {
                                                        clean_claim_request_after_safety_delay!();
                                                } else { // If false, generate new claim request with update outpoint set
+                                                       let mut at_least_one_drop = false;
                                                        for input in tx.input.iter() {
                                                                if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
                                                                        claimed_outputs_material.push((input.previous_output, input_material));
+                                                                       at_least_one_drop = true;
                                                                }
                                                                // If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
                                                                if claim_material.per_input_material.is_empty() {
@@ -585,7 +606,9 @@ impl OnchainTxHandler {
                                                                }
                                                        }
                                                        //TODO: recompute soonest_timelock to avoid wasting a bit on fees
-                                                       bump_candidates.insert(first_claim_txid_height.0.clone());
+                                                       if at_least_one_drop {
+                                                               bump_candidates.insert(first_claim_txid_height.0.clone());
+                                                       }
                                                }
                                                break; //No need to iterate further, either tx is our or their
                                        } else {
@@ -636,6 +659,7 @@ impl OnchainTxHandler {
                }
 
                // Build, bump and rebroadcast tx accordingly
+               log_trace!(self, "Bumping {} candidates", bump_candidates.len());
                for first_claim_txid in bump_candidates.iter() {
                        if let Some((new_timer, new_feerate)) = {
                                if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) {
@@ -652,8 +676,6 @@ impl OnchainTxHandler {
                                } else { unreachable!(); }
                        }
                }
-
-               spendable_outputs
        }
 
        pub(super) fn block_disconnected<B: Deref, F: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F)
@@ -702,4 +724,18 @@ impl OnchainTxHandler {
                        self.pending_claim_requests.remove(&req);
                }
        }
+
+       pub(super) fn provide_latest_local_tx(&mut self, tx: LocalCommitmentTransaction) -> Result<(), ()> {
+               // To prevent any unsafe state discrepancy between offchain and onchain, once local
+               // commitment transaction has been signed due to an event (either block height for
+               // HTLC-timeout or channel force-closure), don't allow any further update of local
+               // commitment transaction view to avoid delivery of revocation secret to counterparty
+               // for the aformentionned signed transaction.
+               if let Some(ref local_commitment) = self.local_commitment {
+                       if local_commitment.has_local_sig() { return Err(()) }
+               }
+               self.prev_local_commitment = self.local_commitment.take();
+               self.local_commitment = Some(tx);
+               Ok(())
+       }
 }