Update monitor with preimage after channel close
[rust-lightning] / lightning / src / chain / channelmonitor.rs
index 3adf3e0e70528d64985005d217730a4fd44a456e..1e0e7fef13a91bdc5e1db4b178f5685c324d4ae0 100644 (file)
@@ -57,21 +57,36 @@ use std::io::Error;
 
 /// An update generated by the underlying Channel itself which contains some new information the
 /// ChannelMonitor should be made aware of.
-#[cfg_attr(test, derive(PartialEq))]
+#[cfg_attr(any(test, feature = "_test_utils"), derive(PartialEq))]
 #[derive(Clone)]
 #[must_use]
 pub struct ChannelMonitorUpdate {
        pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
        /// The sequence number of this update. Updates *must* be replayed in-order according to this
        /// sequence number (and updates may panic if they are not). The update_id values are strictly
-       /// increasing and increase by one for each new update.
+       /// increasing and increase by one for each new update, with one exception specified below.
        ///
        /// This sequence number is also used to track up to which points updates which returned
        /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
        /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
+       ///
+       /// The only instance where update_id values are not strictly increasing is the case where we
+       /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
+       /// its docs for more details.
+       ///
+       /// [`CLOSED_CHANNEL_UPDATE_ID`]: constant.CLOSED_CHANNEL_UPDATE_ID.html
        pub update_id: u64,
 }
 
+/// If:
+///    (1) a channel has been force closed and
+///    (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
+///        this channel's (the backward link's) broadcasted commitment transaction
+/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
+/// with the update providing said payment preimage. No other update types are allowed after
+/// force-close.
+pub const CLOSED_CHANNEL_UPDATE_ID: u64 = std::u64::MAX;
+
 impl Writeable for ChannelMonitorUpdate {
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
                self.update_id.write(w)?;
@@ -95,7 +110,7 @@ impl Readable for ChannelMonitorUpdate {
 }
 
 /// An error enum representing a failure to persist a channel monitor update.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 pub enum ChannelMonitorUpdateErr {
        /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
        /// our state failed, but is expected to succeed at some point in the future).
@@ -159,7 +174,7 @@ pub enum ChannelMonitorUpdateErr {
 /// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
 /// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
 /// corrupted.
-/// Contains a human-readable error message.
+/// Contains a developer-readable error message.
 #[derive(Debug)]
 pub struct MonitorUpdateError(pub &'static str);
 
@@ -470,7 +485,7 @@ enum OnchainEvent {
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
-#[cfg_attr(test, derive(PartialEq))]
+#[cfg_attr(any(test, feature = "_test_utils"), derive(PartialEq))]
 #[derive(Clone)]
 pub(crate) enum ChannelMonitorUpdateStep {
        LatestHolderCommitmentTXInfo {
@@ -631,7 +646,7 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
        /// spending. Thus, in order to claim them via revocation key, we track all the counterparty
        /// commitment transactions which we find on-chain, mapping them to the commitment number which
        /// can be used to derive the revocation key and claim the transactions.
-       counterparty_commitment_txn_on_chain: HashMap<Txid, (u64, Vec<Script>)>,
+       counterparty_commitment_txn_on_chain: HashMap<Txid, u64>,
        /// Cache used to make pruning of payment_preimages faster.
        /// Maps payment_hash values to commitment numbers for counterparty transactions for non-revoked
        /// counterparty transactions (ie should remain pretty small).
@@ -666,7 +681,7 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
        // interface knows about the TXOs that we want to be notified of spends of. We could probably
        // be smart and derive them from the above storage fields, but its much simpler and more
        // Obviously Correct (tm) if we just keep track of them explicitly.
-       outputs_to_watch: HashMap<Txid, Vec<Script>>,
+       outputs_to_watch: HashMap<Txid, Vec<(u32, Script)>>,
 
        #[cfg(test)]
        pub onchain_tx_handler: OnchainTxHandler<ChanSigner>,
@@ -696,7 +711,7 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
        secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
 }
 
-#[cfg(any(test, feature = "fuzztarget"))]
+#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
 /// Used only in testing and fuzztarget to check serialization roundtrips don't change the
 /// underlying object
 impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
@@ -746,7 +761,7 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
        /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
        /// returned block hash and the the current chain and then reconnecting blocks to get to the
        /// best chain) upon deserializing the object!
-       pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
+       pub fn serialize_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
                //TODO: We still write out all the serialization here manually instead of using the fancy
                //serialization framework we have, we should migrate things over to it.
                writer.write_all(&[SERIALIZATION_VERSION; 1])?;
@@ -824,13 +839,9 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
                }
 
                writer.write_all(&byte_utils::be64_to_array(self.counterparty_commitment_txn_on_chain.len() as u64))?;
-               for (ref txid, &(commitment_number, ref txouts)) in self.counterparty_commitment_txn_on_chain.iter() {
+               for (ref txid, commitment_number) in self.counterparty_commitment_txn_on_chain.iter() {
                        writer.write_all(&txid[..])?;
-                       writer.write_all(&byte_utils::be48_to_array(commitment_number))?;
-                       (txouts.len() as u64).write(writer)?;
-                       for script in txouts.iter() {
-                               script.write(writer)?;
-                       }
+                       writer.write_all(&byte_utils::be48_to_array(*commitment_number))?;
                }
 
                writer.write_all(&byte_utils::be64_to_array(self.counterparty_hash_commitment_number.len() as u64))?;
@@ -918,10 +929,11 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
                }
 
                (self.outputs_to_watch.len() as u64).write(writer)?;
-               for (txid, output_scripts) in self.outputs_to_watch.iter() {
+               for (txid, idx_scripts) in self.outputs_to_watch.iter() {
                        txid.write(writer)?;
-                       (output_scripts.len() as u64).write(writer)?;
-                       for script in output_scripts.iter() {
+                       (idx_scripts.len() as u64).write(writer)?;
+                       for (idx, script) in idx_scripts.iter() {
+                               idx.write(writer)?;
                                script.write(writer)?;
                        }
                }
@@ -967,7 +979,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx);
 
                let mut outputs_to_watch = HashMap::new();
-               outputs_to_watch.insert(funding_info.0.txid, vec![funding_info.1.clone()]);
+               outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]);
 
                ChannelMonitor {
                        latest_update_id: 0,
@@ -985,7 +997,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
 
                        counterparty_tx_cache,
                        funding_redeemscript,
-                       channel_value_satoshis: channel_value_satoshis,
+                       channel_value_satoshis,
                        their_cur_revocation_points: None,
 
                        on_holder_tx_csv,
@@ -1133,7 +1145,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        delayed_payment_key: commitment_tx.keys.broadcaster_delayed_payment_key,
                        per_commitment_point: commitment_tx.keys.per_commitment_point,
                        feerate_per_kw: commitment_tx.feerate_per_kw,
-                       htlc_outputs: htlc_outputs,
+                       htlc_outputs,
                };
                self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx);
                self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
@@ -1165,28 +1177,38 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// itself.
        ///
        /// panics if the given update is not the next update by update_id.
-       pub fn update_monitor<B: Deref, L: Deref>(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
+       pub fn update_monitor<B: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
                where B::Target: BroadcasterInterface,
                                        L::Target: Logger,
        {
-               if self.latest_update_id + 1 != updates.update_id {
+               // ChannelMonitor updates may be applied after force close if we receive a
+               // preimage for a broadcasted commitment transaction HTLC output that we'd
+               // like to claim on-chain. If this is the case, we no longer have guaranteed
+               // access to the monitor's update ID, so we use a sentinel value instead.
+               if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
+                       match updates.updates[0] {
+                               ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
+                               _ => panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage"),
+                       }
+                       assert_eq!(updates.updates.len(), 1);
+               } else if self.latest_update_id + 1 != updates.update_id {
                        panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
                }
-               for update in updates.updates.drain(..) {
+               for update in updates.updates.iter() {
                        match update {
                                ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
                                        if self.lockdown_from_offchain { panic!(); }
-                                       self.provide_latest_holder_commitment_tx_info(commitment_tx, htlc_outputs)?
+                                       self.provide_latest_holder_commitment_tx_info(commitment_tx.clone(), htlc_outputs.clone())?
                                },
                                ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
-                                       self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger),
+                                       self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger),
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
                                        self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
                                ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
-                                       self.provide_secret(idx, secret)?,
+                                       self.provide_secret(*idx, *secret)?,
                                ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
                                        self.lockdown_from_offchain = true;
-                                       if should_broadcast {
+                                       if *should_broadcast {
                                                self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
                                        } else {
                                                log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
@@ -1213,16 +1235,12 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// transaction), which we must learn about spends of via block_connected().
        ///
        /// (C-not exported) because we have no HashMap bindings
-       pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<Script>> {
+       pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, Script)>> {
                // If we've detected a counterparty commitment tx on chain, we must include it in the set
                // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
                // its trivial to do, double-check that here.
-               for (txid, &(_, ref outputs)) in self.counterparty_commitment_txn_on_chain.iter() {
-                       let watched_outputs = self.outputs_to_watch.get(txid).expect("Counterparty commitment txn which have been broadcast should have outputs registered");
-                       assert_eq!(watched_outputs.len(), outputs.len());
-                       for (watched, output) in watched_outputs.iter().zip(outputs.iter()) {
-                               assert_eq!(watched, output);
-                       }
+               for (txid, _) in self.counterparty_commitment_txn_on_chain.iter() {
+                       self.outputs_to_watch.get(txid).expect("Counterparty commitment txn which have been broadcast should have outputs registered");
                }
                &self.outputs_to_watch
        }
@@ -1272,7 +1290,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// HTLC-Success/HTLC-Timeout transactions.
        /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
        /// revoked counterparty commitment tx
-       fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<TxOut>)) where L::Target: Logger {
+       fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<(u32, TxOut)>)) where L::Target: Logger {
                // Most secp and related errors trying to create keys means we have no hope of constructing
                // a spend transaction...so we return no transactions to broadcast
                let mut claimable_outpoints = Vec::new();
@@ -1327,8 +1345,10 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
                                // We're definitely a counterparty commitment transaction!
                                log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
-                               watch_outputs.append(&mut tx.output.clone());
-                               self.counterparty_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
+                               for (idx, outp) in tx.output.iter().enumerate() {
+                                       watch_outputs.push((idx as u32, outp.clone()));
+                               }
+                               self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
 
                                macro_rules! check_htlc_fails {
                                        ($txid: expr, $commitment_tx: expr) => {
@@ -1374,8 +1394,10 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        // already processed the block, resulting in the counterparty_commitment_txn_on_chain entry
                        // not being generated by the above conditional. Thus, to be safe, we go ahead and
                        // insert it here.
-                       watch_outputs.append(&mut tx.output.clone());
-                       self.counterparty_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
+                       for (idx, outp) in tx.output.iter().enumerate() {
+                               watch_outputs.push((idx as u32, outp.clone()));
+                       }
+                       self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
 
                        log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
 
@@ -1464,7 +1486,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        }
 
        /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
-       fn check_spend_counterparty_htlc<L: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec<ClaimRequest>, Option<(Txid, Vec<TxOut>)>) where L::Target: Logger {
+       fn check_spend_counterparty_htlc<L: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec<ClaimRequest>, Option<(Txid, Vec<(u32, TxOut)>)>) where L::Target: Logger {
                let htlc_txid = tx.txid();
                if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 {
                        return (Vec::new(), None)
@@ -1486,10 +1508,11 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
                let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key,  per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv };
                let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data });
-               (claimable_outpoints, Some((htlc_txid, tx.output.clone())))
+               let outputs = vec![(0, tx.output[0].clone())];
+               (claimable_outpoints, Some((htlc_txid, outputs)))
        }
 
-       fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Vec<TxOut>, Option<(Script, PublicKey, PublicKey)>) {
+       fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Vec<(u32, TxOut)>, Option<(Script, PublicKey, PublicKey)>) {
                let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len());
                let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len());
 
@@ -1510,7 +1533,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                                        } else { None },
                                                amount: htlc.amount_msat,
                                }});
-                               watch_outputs.push(commitment_tx.output[transaction_output_index as usize].clone());
+                               watch_outputs.push((transaction_output_index, commitment_tx.output[transaction_output_index as usize].clone()));
                        }
                }
 
@@ -1520,7 +1543,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
        /// revoked using data in holder_claimable_outpoints.
        /// Should not be used if check_spend_revoked_transaction succeeds.
-       fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<TxOut>)) where L::Target: Logger {
+       fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<(u32, TxOut)>)) where L::Target: Logger {
                let commitment_txid = tx.txid();
                let mut claim_requests = Vec::new();
                let mut watch_outputs = Vec::new();
@@ -1670,7 +1693,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// [`get_outputs_to_watch`].
        ///
        /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch
-       pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<TxOut>)>
+       pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<(u32, TxOut)>)>
                where B::Target: BroadcasterInterface,
                      F::Target: FeeEstimator,
                                        L::Target: Logger,
@@ -1713,7 +1736,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                                claimable_outpoints.append(&mut new_outpoints);
                                        }
                                } else {
-                                       if let Some(&(commitment_number, _)) = self.counterparty_commitment_txn_on_chain.get(&prevout.txid) {
+                                       if let Some(&commitment_number) = self.counterparty_commitment_txn_on_chain.get(&prevout.txid) {
                                                let (mut new_outpoints, new_outputs_option) = self.check_spend_counterparty_htlc(&tx, commitment_number, height, &logger);
                                                claimable_outpoints.append(&mut new_outpoints);
                                                if let Some(new_outputs) = new_outputs_option {
@@ -1771,9 +1794,23 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                // Determine new outputs to watch by comparing against previously known outputs to watch,
                // updating the latter in the process.
                watch_outputs.retain(|&(ref txid, ref txouts)| {
-                       let output_scripts = txouts.iter().map(|o| o.script_pubkey.clone()).collect();
-                       self.outputs_to_watch.insert(txid.clone(), output_scripts).is_none()
+                       let idx_and_scripts = txouts.iter().map(|o| (o.0, o.1.script_pubkey.clone())).collect();
+                       self.outputs_to_watch.insert(txid.clone(), idx_and_scripts).is_none()
                });
+               #[cfg(test)]
+               {
+                       // If we see a transaction for which we registered outputs previously,
+                       // make sure the registered scriptpubkey at the expected index match
+                       // the actual transaction output one. We failed this case before #653.
+                       for tx in &txn_matched {
+                               if let Some(outputs) = self.get_outputs_to_watch().get(&tx.txid()) {
+                                       for idx_and_script in outputs.iter() {
+                                               assert!((idx_and_script.0 as usize) < tx.output.len());
+                                               assert_eq!(tx.output[idx_and_script.0 as usize].script_pubkey, idx_and_script.1);
+                                       }
+                               }
+                       }
+               }
                watch_outputs
        }
 
@@ -1821,8 +1858,19 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        fn spends_watched_output(&self, tx: &Transaction) -> bool {
                for input in tx.input.iter() {
                        if let Some(outputs) = self.get_outputs_to_watch().get(&input.previous_output.txid) {
-                               for (idx, _script_pubkey) in outputs.iter().enumerate() {
-                                       if idx == input.previous_output.vout as usize {
+                               for (idx, _script_pubkey) in outputs.iter() {
+                                       if *idx == input.previous_output.vout {
+                                               #[cfg(test)]
+                                               {
+                                                       // If the expected script is a known type, check that the witness
+                                                       // appears to be spending the correct type (ie that the match would
+                                                       // actually succeed in BIP 158/159-style filters).
+                                                       if _script_pubkey.is_v0_p2wsh() {
+                                                               assert_eq!(&bitcoin::Address::p2wsh(&Script::from(input.witness.last().unwrap().clone()), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey);
+                                                       } else if _script_pubkey.is_v0_p2wpkh() {
+                                                               assert_eq!(&bitcoin::Address::p2wpkh(&bitcoin::PublicKey::from_slice(&input.witness.last().unwrap()).unwrap(), bitcoin::Network::Bitcoin).unwrap().script_pubkey(), _script_pubkey);
+                                                       } else { panic!(); }
+                                               }
                                                return true;
                                        }
                                }
@@ -2100,6 +2148,61 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        }
 }
 
+/// `Persist` defines behavior for persisting channel monitors: this could mean
+/// writing once to disk, and/or uploading to one or more backup services.
+///
+/// Note that for every new monitor, you **must** persist the new `ChannelMonitor`
+/// to disk/backups. And, on every update, you **must** persist either the
+/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk
+/// of situations such as revoking a transaction, then crashing before this
+/// revocation can be persisted, then unintentionally broadcasting a revoked
+/// transaction and losing money. This is a risk because previous channel states
+/// are toxic, so it's important that whatever channel state is persisted is
+/// kept up-to-date.
+pub trait Persist<Keys: ChannelKeys>: Send + Sync {
+       /// Persist a new channel's data. The data can be stored any way you want, but
+       /// the identifier provided by Rust-Lightning is the channel's outpoint (and
+       /// it is up to you to maintain a correct mapping between the outpoint and the
+       /// stored channel data). Note that you **must** persist every new monitor to
+       /// disk. See the `Persist` trait documentation for more details.
+       ///
+       /// See [`ChannelMonitor::serialize_for_disk`] for writing out a `ChannelMonitor`,
+       /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+       ///
+       /// [`ChannelMonitor::serialize_for_disk`]: struct.ChannelMonitor.html#method.serialize_for_disk
+       /// [`ChannelMonitorUpdateErr`]: enum.ChannelMonitorUpdateErr.html
+       fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor<Keys>) -> Result<(), ChannelMonitorUpdateErr>;
+
+       /// Update one channel's data. The provided `ChannelMonitor` has already
+       /// applied the given update.
+       ///
+       /// Note that on every update, you **must** persist either the
+       /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See
+       /// the `Persist` trait documentation for more details.
+       ///
+       /// If an implementer chooses to persist the updates only, they need to make
+       /// sure that all the updates are applied to the `ChannelMonitors` *before*
+       /// the set of channel monitors is given to the `ChannelManager`
+       /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
+       /// applying a monitor update to a monitor. If full `ChannelMonitors` are
+       /// persisted, then there is no need to persist individual updates.
+       ///
+       /// Note that there could be a performance tradeoff between persisting complete
+       /// channel monitors on every update vs. persisting only updates and applying
+       /// them in batches. The size of each monitor grows `O(number of state updates)`
+       /// whereas updates are small and `O(1)`.
+       ///
+       /// See [`ChannelMonitor::serialize_for_disk`] for writing out a `ChannelMonitor`,
+       /// [`ChannelMonitorUpdate::write`] for writing out an update, and
+       /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+       ///
+       /// [`ChannelMonitor::update_monitor`]: struct.ChannelMonitor.html#impl-1
+       /// [`ChannelMonitor::serialize_for_disk`]: struct.ChannelMonitor.html#method.serialize_for_disk
+       /// [`ChannelMonitorUpdate::write`]: struct.ChannelMonitorUpdate.html#method.write
+       /// [`ChannelMonitorUpdateErr`]: enum.ChannelMonitorUpdateErr.html
+       fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<Keys>) -> Result<(), ChannelMonitorUpdateErr>;
+}
+
 const MAX_ALLOC_SIZE: usize = 64*1024;
 
 impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor<ChanSigner>) {
@@ -2205,12 +2308,7 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor
                for _ in 0..counterparty_commitment_txn_on_chain_len {
                        let txid: Txid = Readable::read(reader)?;
                        let commitment_number = <U48 as Readable>::read(reader)?.0;
-                       let outputs_count = <u64 as Readable>::read(reader)?;
-                       let mut outputs = Vec::with_capacity(cmp::min(outputs_count as usize, MAX_ALLOC_SIZE / 8));
-                       for _ in 0..outputs_count {
-                               outputs.push(Readable::read(reader)?);
-                       }
-                       if let Some(_) = counterparty_commitment_txn_on_chain.insert(txid, (commitment_number, outputs)) {
+                       if let Some(_) = counterparty_commitment_txn_on_chain.insert(txid, commitment_number) {
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -2329,13 +2427,13 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor
                }
 
                let outputs_to_watch_len: u64 = Readable::read(reader)?;
-               let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<Vec<Script>>())));
+               let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<Script>>())));
                for _ in 0..outputs_to_watch_len {
                        let txid = Readable::read(reader)?;
                        let outputs_len: u64 = Readable::read(reader)?;
-                       let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Script>()));
+                       let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<u32>() + mem::size_of::<Script>())));
                        for _ in 0..outputs_len {
-                               outputs.push(Readable::read(reader)?);
+                               outputs.push((Readable::read(reader)?, Readable::read(reader)?));
                        }
                        if let Some(_) = outputs_to_watch.insert(txid, outputs) {
                                return Err(DecodeError::InvalidValue);