}
}
-/// An error enum representing a failure to persist a channel monitor update.
-#[derive(Clone, Copy, Debug, PartialEq)]
-pub enum ChannelMonitorUpdateErr {
- /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
- /// our state failed, but is expected to succeed at some point in the future).
- ///
- /// Such a failure will "freeze" a channel, preventing us from revoking old states or
- /// submitting new commitment transactions to the counterparty. Once the update(s) which failed
- /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to
- /// restore the channel to an operational state.
- ///
- /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
- /// you return a TemporaryFailure you must ensure that it is written to disk safely before
- /// writing out the latest ChannelManager state.
- ///
- /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
- /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
- /// to claim it on this channel) and those updates must be applied wherever they can be. At
- /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
- /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
- /// the channel which would invalidate previous ChannelMonitors are not made when a channel has
- /// been "frozen".
- ///
- /// Note that even if updates made after TemporaryFailure succeed you must still call
- /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel
- /// operation.
- ///
- /// Note that the update being processed here will not be replayed for you when you call
- /// ChannelManager::channel_monitor_updated, so you must store the update itself along
- /// with the persisted ChannelMonitor on your own local disk prior to returning a
- /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
- /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
- /// reload-time.
- ///
- /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
- /// remote location (with local copies persisted immediately), it is anticipated that all
- /// updates will return TemporaryFailure until the remote copies could be updated.
- TemporaryFailure,
- /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
- /// different watchtower and cannot update with all watchtowers that were previously informed
- /// of this channel).
- ///
- /// At reception of this error, ChannelManager will force-close the channel and return at
- /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at
- /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel
- /// update must be rejected.
- ///
- /// This failure may also signal a failure to update the local persisted copy of one of
- /// the channel monitor instance.
- ///
- /// Note that even when you fail a holder commitment transaction update, you must store the
- /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor
- /// broadcasts it (e.g distributed channel-monitor deployment)
- ///
- /// In case of distributed watchtowers deployment, the new version must be written to disk, as
- /// state may have been stored but rejected due to a block forcing a commitment broadcast. This
- /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
- /// lagging behind on block processing.
- PermanentFailure,
-}
-
-/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
-/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
-/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
-/// corrupted.
-/// Contains a developer-readable error message.
-#[derive(Clone, Debug)]
-pub struct MonitorUpdateError(pub &'static str);
-
/// An event to be processed by the ChannelManager.
#[derive(Clone, PartialEq)]
pub enum MonitorEvent {
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),
- /// A monitor event that the Channel's commitment transaction was broadcasted.
- CommitmentTxBroadcasted(OutPoint),
+ /// A monitor event that the Channel's commitment transaction was confirmed.
+ CommitmentTxConfirmed(OutPoint),
+
+ /// Indicates a [`ChannelMonitor`] update has completed. See
+ /// [`ChannelMonitorUpdateErr::TemporaryFailure`] for more information on how this is used.
+ ///
+ /// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
+ UpdateCompleted {
+ /// The funding outpoint of the [`ChannelMonitor`] that was updated
+ funding_txo: OutPoint,
+ /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
+ /// [`ChannelMonitor::get_latest_update_id`].
+ ///
+ /// Note that this should only be set to a given update's ID if all previous updates for the
+ /// same [`ChannelMonitor`] have been applied and persisted.
+ monitor_update_id: u64,
+ },
+
+ /// Indicates a [`ChannelMonitor`] update has failed. See
+ /// [`ChannelMonitorUpdateErr::PermanentFailure`] for more information on how this is used.
+ ///
+ /// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
+ UpdateFailed(OutPoint),
}
+impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
+ // Note that UpdateCompleted and UpdateFailed are currently never serialized to disk as they are
+ // generated only in ChainMonitor
+ (0, UpdateCompleted) => {
+ (0, funding_txo, required),
+ (2, monitor_update_id, required),
+ },
+;
+ (2, HTLCEvent),
+ (4, CommitmentTxConfirmed),
+ (6, UpdateFailed),
+);
/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the
/// with at worst this delay, so we are not only using this value as a mercy for them but also
/// us as a safeguard to delay with enough time.
pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
-/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
-/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
+/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding
+/// inbound HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us
+/// losing money.
+///
+/// Note that this is a library-wide security assumption. If a reorg deeper than this number of
+/// blocks occurs, counterparties may be able to steal funds or claims made by and balances exposed
+/// by a [`ChannelMonitor`] may be incorrect.
// We also use this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
// It may cause spurious generation of bumped claim txn but that's alright given the outpoint is already
// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
/// fail this HTLC,
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
/// condition with the above), we will fail this HTLC without telling the user we received it,
-/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
-/// that HTLC expires within this many blocks, we will simply fail the HTLC instead.
///
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
/// in a race condition between the user connecting a block (which would fail it) and the user
/// providing us the preimage (which would claim it).
-///
-/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
-/// end up force-closing the channel on us to claim it.
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
// TODO(devrandom) replace this with HolderCommitmentTransaction
},
);
+/// Details about the balance(s) available for spending once the channel appears on chain.
+///
+/// See [`ChannelMonitor::get_claimable_balances`] for more details on when these will or will not
+/// be provided.
+#[derive(Clone, Debug, PartialEq, Eq)]
+#[cfg_attr(test, derive(PartialOrd, Ord))]
+pub enum Balance {
+ /// The channel is not yet closed (or the commitment or closing transaction has not yet
+ /// appeared in a block). The given balance is claimable (less on-chain fees) if the channel is
+ /// force-closed now.
+ ClaimableOnChannelClose {
+ /// The amount available to claim, in satoshis, excluding the on-chain fees which will be
+ /// required to do so.
+ claimable_amount_satoshis: u64,
+ },
+ /// The channel has been closed, and the given balance is ours but awaiting confirmations until
+ /// we consider it spendable.
+ ClaimableAwaitingConfirmations {
+ /// The amount available to claim, in satoshis, possibly excluding the on-chain fees which
+ /// were spent in broadcasting the transaction.
+ claimable_amount_satoshis: u64,
+ /// The height at which an [`Event::SpendableOutputs`] event will be generated for this
+ /// amount.
+ confirmation_height: u32,
+ },
+ /// The channel has been closed, and the given balance should be ours but awaiting spending
+ /// transaction confirmation. If the spending transaction does not confirm in time, it is
+ /// possible our counterparty can take the funds by broadcasting an HTLC timeout on-chain.
+ ///
+ /// Once the spending transaction confirms, before it has reached enough confirmations to be
+ /// considered safe from chain reorganizations, the balance will instead be provided via
+ /// [`Balance::ClaimableAwaitingConfirmations`].
+ ContentiousClaimable {
+ /// The amount available to claim, in satoshis, excluding the on-chain fees which will be
+ /// required to do so.
+ claimable_amount_satoshis: u64,
+ /// The height at which the counterparty may be able to claim the balance if we have not
+ /// done so.
+ timeout_height: u32,
+ },
+ /// HTLCs which we sent to our counterparty which are claimable after a timeout (less on-chain
+ /// fees) if the counterparty does not know the preimage for the HTLCs. These are somewhat
+ /// likely to be claimed by our counterparty before we do.
+ MaybeClaimableHTLCAwaitingTimeout {
+ /// The amount available to claim, in satoshis, excluding the on-chain fees which will be
+ /// required to do so.
+ claimable_amount_satoshis: u64,
+ /// The height at which we will be able to claim the balance if our counterparty has not
+ /// done so.
+ claimable_height: u32,
+ },
+}
+
/// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
#[derive(PartialEq)]
struct IrrevocablyResolvedHTLC {
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
+ // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated
+ // during chain data processing. This prevents a race in `ChainMonitor::update_channel` (and
+ // presumably user implementations thereof as well) where we update the in-memory channel
+ // object, then before the persistence finishes (as it's all under a read-lock), we return
+ // pending events to the user or to the relevant `ChannelManager`. Then, on reload, we'll have
+ // the pre-event state here, but have processed the event in the `ChannelManager`.
+ // Note that because the `event_lock` in `ChainMonitor` is only taken in
+ // block/transaction-connected events and *not* during block/transaction-disconnected events,
+ // we further MUST NOT generate events during block/transaction-disconnection.
pending_monitor_events: Vec<MonitorEvent>,
+
pending_events: Vec<Event>,
// Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on
// remote monitor out-of-order with regards to the block view.
holder_tx_signed: bool,
+ // If a spend of the funding output is seen, we set this to true and reject any further
+ // updates. This prevents any further changes in the offchain state no matter the order
+ // of block connection between ChannelMonitors and the ChannelManager.
+ funding_spend_seen: bool,
+
funding_spend_confirmed: Option<Txid>,
/// The set of HTLCs which have been either claimed or failed on chain and have reached
/// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the
self.outputs_to_watch != other.outputs_to_watch ||
self.lockdown_from_offchain != other.lockdown_from_offchain ||
self.holder_tx_signed != other.holder_tx_signed ||
+ self.funding_spend_seen != other.funding_spend_seen ||
self.funding_spend_confirmed != other.funding_spend_confirmed ||
self.htlcs_resolved_on_chain != other.htlcs_resolved_on_chain
{
writer.write_all(&payment_preimage.0[..])?;
}
- writer.write_all(&byte_utils::be64_to_array(self.pending_monitor_events.len() as u64))?;
+ writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
+ MonitorEvent::HTLCEvent(_) => true,
+ MonitorEvent::CommitmentTxConfirmed(_) => true,
+ _ => false,
+ }).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() {
match event {
MonitorEvent::HTLCEvent(upd) => {
0u8.write(writer)?;
upd.write(writer)?;
},
- MonitorEvent::CommitmentTxBroadcasted(_) => 1u8.write(writer)?
+ MonitorEvent::CommitmentTxConfirmed(_) => 1u8.write(writer)?,
+ _ => {}, // Covered in the TLV writes below
}
}
write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, vec_type),
+ (5, self.pending_monitor_events, vec_type),
+ (7, self.funding_spend_seen, required),
});
Ok(())
lockdown_from_offchain: false,
holder_tx_signed: false,
+ funding_spend_seen: false,
funding_spend_confirmed: None,
htlcs_resolved_on_chain: Vec::new(),
}
#[cfg(test)]
- fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
+ fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> {
self.inner.lock().unwrap().provide_secret(idx, secret)
}
#[cfg(test)]
fn provide_latest_holder_commitment_tx(
- &self,
- holder_commitment_tx: HolderCommitmentTransaction,
+ &self, holder_commitment_tx: HolderCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
- ) -> Result<(), MonitorUpdateError> {
- self.inner.lock().unwrap().provide_latest_holder_commitment_tx(
- holder_commitment_tx, htlc_outputs)
+ ) -> Result<(), ()> {
+ self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs).map_err(|_| ())
}
#[cfg(test)]
broadcaster: &B,
fee_estimator: &F,
logger: &L,
- ) -> Result<(), MonitorUpdateError>
+ ) -> Result<(), ()>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
pub fn current_best_block(&self) -> BestBlock {
self.inner.lock().unwrap().best_block.clone()
}
+
+ /// Gets the balances in this channel which are either claimable by us if we were to
+ /// force-close the channel now or which are claimable on-chain (possibly awaiting
+ /// confirmation).
+ ///
+ /// Any balances in the channel which are available on-chain (excluding on-chain fees) are
+ /// included here until an [`Event::SpendableOutputs`] event has been generated for the
+ /// balance, or until our counterparty has claimed the balance and accrued several
+ /// confirmations on the claim transaction.
+ ///
+ /// Note that the balances available when you or your counterparty have broadcasted revoked
+ /// state(s) may not be fully captured here.
+ // TODO, fix that ^
+ ///
+ /// See [`Balance`] for additional details on the types of claimable balances which
+ /// may be returned here and their meanings.
+ pub fn get_claimable_balances(&self) -> Vec<Balance> {
+ let mut res = Vec::new();
+ let us = self.inner.lock().unwrap();
+
+ let mut confirmed_txid = us.funding_spend_confirmed;
+ let mut pending_commitment_tx_conf_thresh = None;
+ let funding_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+ if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
+ Some((event.txid, event.confirmation_threshold()))
+ } else { None }
+ });
+ if let Some((txid, conf_thresh)) = funding_spend_pending {
+ debug_assert!(us.funding_spend_confirmed.is_none(),
+ "We have a pending funding spend awaiting anti-reorg confirmation, we can't have confirmed it already!");
+ confirmed_txid = Some(txid);
+ pending_commitment_tx_conf_thresh = Some(conf_thresh);
+ }
+
+ macro_rules! walk_htlcs {
+ ($holder_commitment: expr, $htlc_iter: expr) => {
+ for htlc in $htlc_iter {
+ if let Some(htlc_input_idx) = htlc.transaction_output_index {
+ if us.htlcs_resolved_on_chain.iter().any(|v| v.input_idx == htlc_input_idx) {
+ assert!(us.funding_spend_confirmed.is_some());
+ } else if htlc.offered == $holder_commitment {
+ // If the payment was outbound, check if there's an HTLCUpdate
+ // indicating we have spent this HTLC with a timeout, claiming it back
+ // and awaiting confirmations on it.
+ let htlc_update_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+ if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event {
+ if input_idx == htlc_input_idx { Some(event.confirmation_threshold()) } else { None }
+ } else { None }
+ });
+ if let Some(conf_thresh) = htlc_update_pending {
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ confirmation_height: conf_thresh,
+ });
+ } else {
+ res.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ claimable_height: htlc.cltv_expiry,
+ });
+ }
+ } else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
+ // Otherwise (the payment was inbound), only expose it as claimable if
+ // we know the preimage.
+ // Note that if there is a pending claim, but it did not use the
+ // preimage, we lost funds to our counterparty! We will then continue
+ // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
+ let htlc_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+ if let OnchainEvent::HTLCSpendConfirmation { input_idx, preimage, .. } = event.event {
+ if input_idx == htlc_input_idx {
+ Some((event.confirmation_threshold(), preimage.is_some()))
+ } else { None }
+ } else { None }
+ });
+ if let Some((conf_thresh, true)) = htlc_spend_pending {
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ confirmation_height: conf_thresh,
+ });
+ } else {
+ res.push(Balance::ContentiousClaimable {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ timeout_height: htlc.cltv_expiry,
+ });
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if let Some(txid) = confirmed_txid {
+ let mut found_commitment_tx = false;
+ if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
+ walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().map(|(a, _)| a));
+ if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
+ if let Some(value) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+ if let OnchainEvent::MaturingOutput {
+ descriptor: SpendableOutputDescriptor::StaticPaymentOutput(descriptor)
+ } = &event.event {
+ Some(descriptor.output.value)
+ } else { None }
+ }) {
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: value,
+ confirmation_height: conf_thresh,
+ });
+ } else {
+ // If a counterparty commitment transaction is awaiting confirmation, we
+ // should either have a StaticPaymentOutput MaturingOutput event awaiting
+ // confirmation with the same height or have never met our dust amount.
+ }
+ }
+ found_commitment_tx = true;
+ } else if txid == us.current_holder_commitment_tx.txid {
+ walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, _)| a));
+ if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: us.current_holder_commitment_tx.to_self_value_sat,
+ confirmation_height: conf_thresh,
+ });
+ }
+ found_commitment_tx = true;
+ } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
+ if txid == prev_commitment.txid {
+ walk_htlcs!(true, prev_commitment.htlc_outputs.iter().map(|(a, _, _)| a));
+ if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: prev_commitment.to_self_value_sat,
+ confirmation_height: conf_thresh,
+ });
+ }
+ found_commitment_tx = true;
+ }
+ }
+ if !found_commitment_tx {
+ if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
+ // We blindly assume this is a cooperative close transaction here, and that
+ // neither us nor our counterparty misbehaved. At worst we've under-estimated
+ // the amount we can claim as we'll punish a misbehaving counterparty.
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: us.current_holder_commitment_tx.to_self_value_sat,
+ confirmation_height: conf_thresh,
+ });
+ }
+ }
+ // TODO: Add logic to provide claimable balances for counterparty broadcasting revoked
+ // outputs.
+ } else {
+ let mut claimable_inbound_htlc_value_sat = 0;
+ for (htlc, _, _) in us.current_holder_commitment_tx.htlc_outputs.iter() {
+ if htlc.transaction_output_index.is_none() { continue; }
+ if htlc.offered {
+ res.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ claimable_height: htlc.cltv_expiry,
+ });
+ } else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
+ claimable_inbound_htlc_value_sat += htlc.amount_msat / 1000;
+ }
+ }
+ res.push(Balance::ClaimableOnChannelClose {
+ claimable_amount_satoshis: us.current_holder_commitment_tx.to_self_value_sat + claimable_inbound_htlc_value_sat,
+ });
+ }
+
+ res
+ }
+
+ /// Gets the set of outbound HTLCs which are pending resolution in this channel.
+ /// This is used to reconstruct pending outbound payments on restart in the ChannelManager.
+ pub(crate) fn get_pending_outbound_htlcs(&self) -> HashMap<HTLCSource, HTLCOutputInCommitment> {
+ let mut res = HashMap::new();
+ let us = self.inner.lock().unwrap();
+
+ macro_rules! walk_htlcs {
+ ($holder_commitment: expr, $htlc_iter: expr) => {
+ for (htlc, source) in $htlc_iter {
+ if us.htlcs_resolved_on_chain.iter().any(|v| Some(v.input_idx) == htlc.transaction_output_index) {
+ // We should assert that funding_spend_confirmed is_some() here, but we
+ // have some unit tests which violate HTLC transaction CSVs entirely and
+ // would fail.
+ // TODO: Once tests all connect transactions at consensus-valid times, we
+ // should assert here like we do in `get_claimable_balances`.
+ } else if htlc.offered == $holder_commitment {
+ // If the payment was outbound, check if there's an HTLCUpdate
+ // indicating we have spent this HTLC with a timeout, claiming it back
+ // and awaiting confirmations on it.
+ let htlc_update_confd = us.onchain_events_awaiting_threshold_conf.iter().any(|event| {
+ if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event {
+ // If the HTLC was timed out, we wait for ANTI_REORG_DELAY blocks
+ // before considering it "no longer pending" - this matches when we
+ // provide the ChannelManager an HTLC failure event.
+ Some(input_idx) == htlc.transaction_output_index &&
+ us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1
+ } else if let OnchainEvent::HTLCSpendConfirmation { input_idx, .. } = event.event {
+ // If the HTLC was fulfilled with a preimage, we consider the HTLC
+ // immediately non-pending, matching when we provide ChannelManager
+ // the preimage.
+ Some(input_idx) == htlc.transaction_output_index
+ } else { false }
+ });
+ if !htlc_update_confd {
+ res.insert(source.clone(), htlc.clone());
+ }
+ }
+ }
+ }
+ }
+
+ // We're only concerned with the confirmation count of HTLC transactions, and don't
+ // actually care how many confirmations a commitment transaction may or may not have. Thus,
+ // we look for either a FundingSpendConfirmation event or a funding_spend_confirmed.
+ let confirmed_txid = us.funding_spend_confirmed.or_else(|| {
+ us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+ if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
+ Some(event.txid)
+ } else { None }
+ })
+ });
+ if let Some(txid) = confirmed_txid {
+ if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
+ walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().filter_map(|(a, b)| {
+ if let &Some(ref source) = b {
+ Some((a, &**source))
+ } else { None }
+ }));
+ } else if txid == us.current_holder_commitment_tx.txid {
+ walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().filter_map(|(a, _, c)| {
+ if let Some(source) = c { Some((a, source)) } else { None }
+ }));
+ } else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
+ if txid == prev_commitment.txid {
+ walk_htlcs!(true, prev_commitment.htlc_outputs.iter().filter_map(|(a, _, c)| {
+ if let Some(source) = c { Some((a, source)) } else { None }
+ }));
+ }
+ }
+ } else {
+ // If we have not seen a commitment transaction on-chain (ie the channel is not yet
+ // closed), just examine the available counterparty commitment transactions. See docs
+ // on `fail_unbroadcast_htlcs`, below, for justification.
+ macro_rules! walk_counterparty_commitment {
+ ($txid: expr) => {
+ if let Some(ref latest_outpoints) = us.counterparty_claimable_outpoints.get($txid) {
+ for &(ref htlc, ref source_option) in latest_outpoints.iter() {
+ if let &Some(ref source) = source_option {
+ res.insert((**source).clone(), htlc.clone());
+ }
+ }
+ }
+ }
+ }
+ if let Some(ref txid) = us.current_counterparty_commitment_txid {
+ walk_counterparty_commitment!(txid);
+ }
+ if let Some(ref txid) = us.prev_counterparty_commitment_txid {
+ walk_counterparty_commitment!(txid);
+ }
+ }
+
+ res
+ }
}
/// Compares a broadcasted commitment transaction's HTLCs with those in the latest state,
/// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
/// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen
/// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key).
- fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
+ fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> {
if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) {
- return Err(MonitorUpdateError("Previous secret did not match new one"));
+ return Err("Previous secret did not match new one");
}
// Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our holder commitment transaction is updated.
/// Panics if set_on_holder_tx_csv has never been called.
- fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
+ fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), &'static str> {
// block for Rust 1.34 compat
let mut new_holder_commitment_tx = {
let trusted_tx = holder_commitment_tx.trust();
mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
if self.holder_tx_signed {
- return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected"));
+ return Err("Latest holder commitment signed has already been signed, update is rejected");
}
Ok(())
}
log_info!(logger, "Broadcasting local {}", log_tx!(tx));
broadcaster.broadcast_transaction(tx);
}
- self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
+ self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
}
- pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), MonitorUpdateError>
+ pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
} else if self.latest_update_id + 1 != updates.update_id {
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
}
+ let mut ret = Ok(());
for update in updates.updates.iter() {
match update {
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info");
if self.lockdown_from_offchain { panic!(); }
- self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone())?
+ if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone()) {
+ log_error!(logger, "Providing latest holder commitment transaction failed/was refused:");
+ log_error!(logger, " {}", e);
+ ret = Err(());
+ }
}
ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_revocation_point } => {
log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
},
ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
log_trace!(logger, "Updating ChannelMonitor with commitment secret");
- self.provide_secret(*idx, *secret)?
+ if let Err(e) = self.provide_secret(*idx, *secret) {
+ log_error!(logger, "Providing latest counterparty commitment secret failed/was refused:");
+ log_error!(logger, " {}", e);
+ ret = Err(());
+ }
},
ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast);
} else if !self.holder_tx_signed {
log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
} else {
- // If we generated a MonitorEvent::CommitmentTxBroadcasted, the ChannelManager
+ // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager
// will still give us a ChannelForceClosed event with !should_broadcast, but we
// shouldn't print the scary warning above.
log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction.");
}
}
self.latest_update_id = updates.update_id;
- Ok(())
+
+ if ret.is_ok() && self.funding_spend_seen {
+ log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent");
+ Err(())
+ } else { ret }
}
pub fn get_latest_update_id(&self) -> u64 {
let prevout = &tx.input[0].previous_output;
if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
let mut balance_spendable_csv = None;
- log_info!(logger, "Channel closed by funding output spend in txid {}.", log_bytes!(tx.txid()));
+ log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
+ log_bytes!(self.funding_info.0.to_channel_id()), tx.txid());
+ self.funding_spend_seen = true;
if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
let (mut new_outpoints, new_outputs) = self.check_spend_counterparty_transaction(&tx, height, &logger);
if !new_outputs.1.is_empty() {
let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone());
let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
claimable_outpoints.push(commitment_package);
- self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
+ self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
self.holder_tx_signed = true;
// Because we're broadcasting a commitment transaction, we should construct the package
let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33)
|| (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && input.witness[1].len() == 33);
let accepted_preimage_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::AcceptedHTLC);
+ #[cfg(not(fuzzing))]
let accepted_timeout_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && !revocation_sig_claim;
let offered_preimage_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && !revocation_sig_claim;
+ #[cfg(not(fuzzing))]
let offered_timeout_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::OfferedHTLC);
let mut payment_preimage = PaymentPreimage([0; 32]);
// resolve the source HTLC with the original sender.
payment_data = Some(((*source).clone(), htlc_output.payment_hash, htlc_output.amount_msat));
} else if !$holder_tx {
- check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output);
+ check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output);
if payment_data.is_none() {
check_htlc_valid_counterparty!(self.prev_counterparty_commitment_txid, htlc_output);
}
// we've already failed the HTLC as the commitment transaction
// which was broadcasted was revoked. In that case, we should
// spend the HTLC output here immediately, and expose that fact
- // as a ClaimableBalance, something which we do not yet do.
+ // as a Balance, something which we do not yet do.
// TODO: Track the above as claimable!
}
continue 'outer_loop;
}
}
-/// `Persist` defines behavior for persisting channel monitors: this could mean
-/// writing once to disk, and/or uploading to one or more backup services.
-///
-/// Note that for every new monitor, you **must** persist the new `ChannelMonitor`
-/// to disk/backups. And, on every update, you **must** persist either the
-/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk
-/// of situations such as revoking a transaction, then crashing before this
-/// revocation can be persisted, then unintentionally broadcasting a revoked
-/// transaction and losing money. This is a risk because previous channel states
-/// are toxic, so it's important that whatever channel state is persisted is
-/// kept up-to-date.
-pub trait Persist<ChannelSigner: Sign> {
- /// Persist a new channel's data. The data can be stored any way you want, but
- /// the identifier provided by Rust-Lightning is the channel's outpoint (and
- /// it is up to you to maintain a correct mapping between the outpoint and the
- /// stored channel data). Note that you **must** persist every new monitor to
- /// disk. See the `Persist` trait documentation for more details.
- ///
- /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`,
- /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
- fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
-
- /// Update one channel's data. The provided `ChannelMonitor` has already
- /// applied the given update.
- ///
- /// Note that on every update, you **must** persist either the
- /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See
- /// the `Persist` trait documentation for more details.
- ///
- /// If an implementer chooses to persist the updates only, they need to make
- /// sure that all the updates are applied to the `ChannelMonitors` *before*
- /// the set of channel monitors is given to the `ChannelManager`
- /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
- /// applying a monitor update to a monitor. If full `ChannelMonitors` are
- /// persisted, then there is no need to persist individual updates.
- ///
- /// Note that there could be a performance tradeoff between persisting complete
- /// channel monitors on every update vs. persisting only updates and applying
- /// them in batches. The size of each monitor grows `O(number of state updates)`
- /// whereas updates are small and `O(1)`.
- ///
- /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`,
- /// [`ChannelMonitorUpdate::write`] for writing out an update, and
- /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
- fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
-}
-
impl<Signer: Sign, T: Deref, F: Deref, L: Deref> chain::Listen for (ChannelMonitor<Signer>, T, F, L)
where
T::Target: BroadcasterInterface,
}
let pending_monitor_events_len: u64 = Readable::read(reader)?;
- let mut pending_monitor_events = Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)));
+ let mut pending_monitor_events = Some(
+ Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3))));
for _ in 0..pending_monitor_events_len {
let ev = match <u8 as Readable>::read(reader)? {
0 => MonitorEvent::HTLCEvent(Readable::read(reader)?),
- 1 => MonitorEvent::CommitmentTxBroadcasted(funding_info.0),
+ 1 => MonitorEvent::CommitmentTxConfirmed(funding_info.0),
_ => return Err(DecodeError::InvalidValue)
};
- pending_monitor_events.push(ev);
+ pending_monitor_events.as_mut().unwrap().push(ev);
}
let pending_events_len: u64 = Readable::read(reader)?;
let mut funding_spend_confirmed = None;
let mut htlcs_resolved_on_chain = Some(Vec::new());
+ let mut funding_spend_seen = Some(false);
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, vec_type),
+ (5, pending_monitor_events, vec_type),
+ (7, funding_spend_seen, option),
});
let mut secp_ctx = Secp256k1::new();
current_holder_commitment_number,
payment_preimages,
- pending_monitor_events,
+ pending_monitor_events: pending_monitor_events.unwrap(),
pending_events,
onchain_events_awaiting_threshold_conf,
lockdown_from_offchain,
holder_tx_signed,
+ funding_spend_seen: funding_spend_seen.unwrap(),
funding_spend_confirmed,
htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
#[cfg(test)]
mod tests {
+ use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::hex::FromHex;
- use bitcoin::hash_types::Txid;
+ use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::network::constants::Network;
+ use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+ use bitcoin::secp256k1::Secp256k1;
+
use hex;
- use chain::BestBlock;
+
+ use super::ChannelMonitorUpdateStep;
+ use ::{check_added_monitors, check_closed_broadcast, check_closed_event, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err};
+ use chain::{BestBlock, Confirm};
use chain::channelmonitor::ChannelMonitor;
use chain::package::{WEIGHT_OFFERED_HTLC, WEIGHT_RECEIVED_HTLC, WEIGHT_REVOKED_OFFERED_HTLC, WEIGHT_REVOKED_RECEIVED_HTLC, WEIGHT_REVOKED_OUTPUT};
use chain::transaction::OutPoint;
+ use chain::keysinterface::InMemorySigner;
use ln::{PaymentPreimage, PaymentHash};
use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
+ use ln::channelmanager::PaymentSendFailure;
+ use ln::features::InitFeatures;
+ use ln::functional_test_utils::*;
use ln::script::ShutdownScript;
+ use util::errors::APIError;
+ use util::events::{ClosureReason, MessageSendEventsProvider};
use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
- use bitcoin::secp256k1::key::{SecretKey,PublicKey};
- use bitcoin::secp256k1::Secp256k1;
+ use util::ser::{ReadableArgs, Writeable};
use sync::{Arc, Mutex};
- use chain::keysinterface::InMemorySigner;
+ use io;
use prelude::*;
+ fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
+ // Previously, monitor updates were allowed freely even after a funding-spend transaction
+ // confirmed. This would allow a race condition where we could receive a payment (including
+ // the counterparty revoking their broadcasted state!) and accept it without recourse as
+ // long as the ChannelMonitor receives the block first, the full commitment update dance
+ // occurs after the block is connected, and before the ChannelManager receives the block.
+ // Obviously this is an incredibly contrived race given the counterparty would be risking
+ // their full channel balance for it, but its worth fixing nonetheless as it makes the
+ // potential ChannelMonitor states simpler to reason about.
+ //
+ // This test checks said behavior, as well as ensuring a ChannelMonitorUpdate with multiple
+ // updates is handled correctly in such conditions.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let channel = create_announced_chan_between_nodes(
+ &nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(
+ &nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+
+ // Rebalance somewhat
+ send_payment(&nodes[0], &[&nodes[1]], 10_000_000);
+
+ // First route two payments for testing at the end
+ let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000).0;
+ let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000).0;
+
+ let local_txn = get_local_commitment_txn!(nodes[1], channel.2);
+ assert_eq!(local_txn.len(), 1);
+ let remote_txn = get_local_commitment_txn!(nodes[0], channel.2);
+ assert_eq!(remote_txn.len(), 3); // Commitment and two HTLC-Timeouts
+ check_spends!(remote_txn[1], remote_txn[0]);
+ check_spends!(remote_txn[2], remote_txn[0]);
+ let broadcast_tx = if use_local_txn { &local_txn[0] } else { &remote_txn[0] };
+
+ // Connect a commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
+ // channel is now closed, but the ChannelManager doesn't know that yet.
+ let new_header = BlockHeader {
+ version: 2, time: 0, bits: 0, nonce: 0,
+ prev_blockhash: nodes[0].best_block_info().0,
+ merkle_root: Default::default() };
+ let conf_height = nodes[0].best_block_info().1 + 1;
+ nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
+ &[(0, broadcast_tx)], conf_height);
+
+ let (_, pre_update_monitor) = <(BlockHash, ChannelMonitor<InMemorySigner>)>::read(
+ &mut io::Cursor::new(&get_monitor!(nodes[1], channel.2).encode()),
+ &nodes[1].keys_manager.backing).unwrap();
+
+ // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
+ // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
+ let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
+ unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)),
+ true, APIError::ChannelUnavailable { ref err },
+ assert!(err.contains("ChannelMonitor storage failure")));
+ check_added_monitors!(nodes[1], 2); // After the failure we generate a close-channel monitor update
+ check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
+
+ // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update
+ // and provides the claim preimages for the two pending HTLCs. The first update generates
+ // an error, but the point of this test is to ensure the later updates are still applied.
+ let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
+ let mut replay_update = monitor_updates.get(&channel.2).unwrap().iter().rev().skip(1).next().unwrap().clone();
+ assert_eq!(replay_update.updates.len(), 1);
+ if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] {
+ } else { panic!(); }
+ replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 });
+ replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 });
+
+ let broadcaster = TestBroadcaster::new(Arc::clone(&nodes[1].blocks));
+ assert!(
+ pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
+ .is_err());
+ // Even though we error'd on the first update, we should still have generated an HTLC claim
+ // transaction
+ let txn_broadcasted = broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert!(txn_broadcasted.len() >= 2);
+ let htlc_txn = txn_broadcasted.iter().filter(|tx| {
+ assert_eq!(tx.input.len(), 1);
+ tx.input[0].previous_output.txid == broadcast_tx.txid()
+ }).collect::<Vec<_>>();
+ assert_eq!(htlc_txn.len(), 2);
+ check_spends!(htlc_txn[0], broadcast_tx);
+ check_spends!(htlc_txn[1], broadcast_tx);
+ }
+ #[test]
+ fn test_funding_spend_refuses_updates() {
+ do_test_funding_spend_refuses_updates(true);
+ do_test_funding_spend_refuses_updates(false);
+ }
+
#[test]
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
selected_contest_delay: 67,
}),
funding_outpoint: Some(funding_outpoint),
+ opt_anchors: None,
};
// Prune with one old state and a holder commitment tx holding a few overlaps with the
// old state.
let mut sum_actual_sigs = 0;
macro_rules! sign_input {
- ($sighash_parts: expr, $idx: expr, $amount: expr, $weight: expr, $sum_actual_sigs: expr) => {
+ ($sighash_parts: expr, $idx: expr, $amount: expr, $weight: expr, $sum_actual_sigs: expr, $opt_anchors: expr) => {
let htlc = HTLCOutputInCommitment {
offered: if *$weight == WEIGHT_REVOKED_OFFERED_HTLC || *$weight == WEIGHT_OFFERED_HTLC { true } else { false },
amount_msat: 0,
payment_hash: PaymentHash([1; 32]),
transaction_output_index: Some($idx as u32),
};
- let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) };
+ let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, $opt_anchors, &pubkey, &pubkey, &pubkey) };
let sighash = hash_to_message!(&$sighash_parts.signature_hash($idx, &redeem_script, $amount, SigHashType::All)[..]);
let sig = secp_ctx.sign(&sighash, &privkey);
$sighash_parts.access_witness($idx).push(sig.serialize_der().to_vec());
{
let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
for (idx, inp) in inputs_weight.iter().enumerate() {
- sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+ sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, false);
inputs_total_weight += inp;
}
}
{
let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
for (idx, inp) in inputs_weight.iter().enumerate() {
- sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+ sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, false);
inputs_total_weight += inp;
}
}
{
let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
for (idx, inp) in inputs_weight.iter().enumerate() {
- sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+ sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, false);
inputs_total_weight += inp;
}
}