X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmonitor.rs;h=91f81fa573664d6d0ceb64380181ee2d4d5adc86;hb=dd7546ea09b1dafc3ce7ebedb574a2fd3c2c1f14;hp=d17078554b4702c0af0020685c0b58fe2292076e;hpb=343aacc50c73e18ddb1ec52570c5050bdccd09ca;p=rust-lightning diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index d1707855..91f81fa5 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -133,11 +133,19 @@ pub enum ChannelMonitorUpdateErr { TemporaryFailure, /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a /// different watchtower and cannot update with all watchtowers that were previously informed - /// of this channel). This will force-close the channel in question (which will generate one - /// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy). + /// of this channel). /// - /// Should also be used to indicate a failure to update the local persisted copy of the channel - /// monitor. + /// At reception of this error, ChannelManager will force-close the channel and return at + /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at + /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel + /// update must be rejected. + /// + /// This failure may also signal a failure to update the local persisted copy of one of + /// the channel monitor instance. + /// + /// Note that even when you fail a holder commitment transaction update, you must store the + /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor + /// broadcasts it (e.g distributed channel-monitor deployment) PermanentFailure, } @@ -824,6 +832,10 @@ pub struct ChannelMonitor { // Set once we've signed a holder commitment transaction and handed it over to our // OnchainTxHandler. After this is set, no future updates to our holder commitment transactions // may occur, and we fail any such monitor updates. + // + // In case of update rejection due to a locally already signed commitment transaction, we + // nevertheless store update content to track in case of concurrent broadcast by another + // remote monitor out-of-order with regards to the block view. holder_tx_signed: bool, // We simply modify last_block_hash in Channel's block_connected so that serialization is @@ -888,6 +900,11 @@ pub trait ManyChannelMonitor: Send + Sync { /// /// Any spends of outputs which should have been registered which aren't passed to /// ChannelMonitors via block_connected may result in FUNDS LOSS. + /// + /// In case of distributed watchtowers deployment, even if an Err is return, the new version + /// must be written to disk, as state may have been stored but rejected due to a block forcing + /// a commitment broadcast. This storage is used to claim outputs of rejected state confirmed + /// onchain by another watchtower, lagging behind on block processing. fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>; /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated @@ -1167,12 +1184,7 @@ impl ChannelMonitor { feerate_per_kw: initial_holder_commitment_tx.feerate_per_kw, htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions }; - // Returning a monitor error before updating tracking points means in case of using - // a concurrent watchtower implementation for same channel, if this one doesn't - // reject update as we do, you MAY have the latest holder valid commitment tx onchain - // for which you want to spend outputs. We're NOT robust again this scenario right - // now but we should consider it later. - onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx).unwrap(); + onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx); ChannelMonitor { latest_update_id: 0, @@ -1327,9 +1339,6 @@ impl ChannelMonitor { /// up-to-date as our holder commitment transaction is updated. /// Panics if set_on_holder_tx_csv has never been called. pub(super) fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>) -> Result<(), MonitorUpdateError> { - if self.holder_tx_signed { - return Err(MonitorUpdateError("A holder commitment tx has already been signed, no new holder commitment txn can be sent to our counterparty")); - } let txid = commitment_tx.txid(); let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64; let locktime = commitment_tx.unsigned_tx.lock_time as u64; @@ -1343,17 +1352,13 @@ impl ChannelMonitor { feerate_per_kw: commitment_tx.feerate_per_kw, htlc_outputs: htlc_outputs, }; - // Returning a monitor error before updating tracking points means in case of using - // a concurrent watchtower implementation for same channel, if this one doesn't - // reject update as we do, you MAY have the latest holder valid commitment tx onchain - // for which you want to spend outputs. We're NOT robust again this scenario right - // now but we should consider it later. - if let Err(_) = self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx) { - return Err(MonitorUpdateError("Holder commitment signed has already been signed, no further update of LOCAL commitment transaction is allowed")); - } + self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx); self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor); mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx); self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx); + if self.holder_tx_signed { + return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected")); + } Ok(()) }