force_close_spend_delay: None,
is_outbound: true, is_channel_ready: true,
is_usable: true, is_public: true,
+ balance_msat: 0,
outbound_capacity_msat: capacity.saturating_mul(1000),
next_outbound_htlc_limit_msat: capacity.saturating_mul(1000),
next_outbound_htlc_minimum_msat: 0,
}
// Test that if the store's path to channel data is read-only, writing a
- // monitor to it results in the store returning an InProgress.
+ // monitor to it results in the store returning an UnrecoverableError.
// Windows ignores the read-only flag for folders, so this test is Unix-only.
#[cfg(not(target_os = "windows"))]
#[test]
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
// Set the store's directory to read-only, which should result in
- // returning a permanent failure when we then attempt to persist a
+ // returning an unrecoverable failure when we then attempt to persist a
// channel update.
let path = &store.get_data_dir();
let mut perms = fs::metadata(path).unwrap().permissions();
/// If at some point no further progress can be made towards persisting the pending updates, the
/// node should simply shut down.
///
-/// * If the persistence has failed and cannot be retried further (e.g. because of some timeout),
+/// * If the persistence has failed and cannot be retried further (e.g. because of an outage),
/// [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
/// an immediate panic and future operations in LDK generally failing.
///
/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
///
/// If at some point no further progress can be made towards persisting a pending update, the node
-/// should simply shut down.
+/// should simply shut down. Until then, the background task should either loop indefinitely, or
+/// persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
+/// and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
+/// monitor updates may be marked completed).
///
/// # Using remote watchtowers
///
/// claims which are awaiting confirmation.
///
/// Includes the balances from each [`ChannelMonitor`] *except* those included in
- /// `ignored_channels`.
+ /// `ignored_channels`, allowing you to filter out balances from channels which are still open
+ /// (and whose balance should likely be pulled from the [`ChannelDetails`]).
///
/// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
/// inclusion in the return value.
/// Returns the descriptors for relevant outputs (i.e., those that we can spend) within the
/// transaction if they exist and the transaction has at least [`ANTI_REORG_DELAY`]
+ /// confirmations. For [`SpendableOutputDescriptor::DelayedPaymentOutput`] descriptors to be
+ /// returned, the transaction must have at least `max(ANTI_REORG_DELAY, to_self_delay)`
/// confirmations.
///
/// Descriptors returned by this method are primarily exposed via [`Event::SpendableOutputs`]
/// missed/unhandled descriptors. For the purpose of gathering historical records, if the
/// channel close has fully resolved (i.e., [`ChannelMonitor::get_claimable_balances`] returns
/// an empty set), you can retrieve all spendable outputs by providing all descendant spending
- /// transactions starting from the channel's funding or closing transaction that have at least
- /// [`ANTI_REORG_DELAY`] confirmations.
+ /// transactions starting from the channel's funding transaction and going down three levels.
///
/// `tx` is a transaction we'll scan the outputs of. Any transaction can be provided. If any
/// outputs which can be spent by us are found, at least one descriptor is returned.
pub fn get_spendable_outputs(&self, tx: &Transaction, confirmation_height: u32) -> Vec<SpendableOutputDescriptor> {
let inner = self.inner.lock().unwrap();
let current_height = inner.best_block.height;
- if current_height.saturating_sub(ANTI_REORG_DELAY) + 1 >= confirmation_height {
- inner.get_spendable_outputs(tx)
- } else {
- Vec::new()
- }
+ let mut spendable_outputs = inner.get_spendable_outputs(tx);
+ spendable_outputs.retain(|descriptor| {
+ let mut conf_threshold = current_height.saturating_sub(ANTI_REORG_DELAY) + 1;
+ if let SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) = descriptor {
+ conf_threshold = cmp::min(conf_threshold,
+ current_height.saturating_sub(descriptor.to_self_delay as u32) + 1);
+ }
+ conf_threshold >= confirmation_height
+ });
+ spendable_outputs
}
}
{
self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
+ let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| {
+ self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event {
+ OnchainEvent::FundingSpendConfirmation { .. } => Some(event.txid),
+ _ => None,
+ })
+ });
+ let confirmed_spend_txid = if let Some(txid) = confirmed_spend_txid {
+ txid
+ } else {
+ return;
+ };
+
// If the channel is force closed, try to claim the output from this preimage.
// First check if a counterparty commitment transaction has been broadcasted:
macro_rules! claim_htlcs {
}
}
if let Some(txid) = self.current_counterparty_commitment_txid {
- if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
- claim_htlcs!(*commitment_number, txid);
+ if txid == confirmed_spend_txid {
+ if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
+ claim_htlcs!(*commitment_number, txid);
+ } else {
+ debug_assert!(false);
+ log_error!(logger, "Detected counterparty commitment tx on-chain without tracking commitment number");
+ }
return;
}
}
if let Some(txid) = self.prev_counterparty_commitment_txid {
- if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
- claim_htlcs!(*commitment_number, txid);
+ if txid == confirmed_spend_txid {
+ if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
+ claim_htlcs!(*commitment_number, txid);
+ } else {
+ debug_assert!(false);
+ log_error!(logger, "Detected counterparty commitment tx on-chain without tracking commitment number");
+ }
return;
}
}
// *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our
// holder commitment transactions.
if self.broadcasted_holder_revokable_script.is_some() {
- // Assume that the broadcasted commitment transaction confirmed in the current best
- // block. Even if not, its a reasonable metric for the bump criteria on the HTLC
- // transactions.
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
- self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
- if let Some(ref tx) = self.prev_holder_signed_commitment_tx {
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, self.best_block.height());
+ let holder_commitment_tx = if self.current_holder_commitment_tx.txid == confirmed_spend_txid {
+ Some(&self.current_holder_commitment_tx)
+ } else if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
+ if prev_holder_commitment_tx.txid == confirmed_spend_txid {
+ Some(prev_holder_commitment_tx)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ if let Some(holder_commitment_tx) = holder_commitment_tx {
+ // Assume that the broadcasted commitment transaction confirmed in the current best
+ // block. Even if not, its a reasonable metric for the bump criteria on the HTLC
+ // transactions.
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height());
self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
}
}
}
pub struct AvailableBalances {
+ /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
+ pub balance_msat: u64,
/// Total amount available for our counterparty to send to us.
pub inbound_capacity_msat: u64,
/// Total amount available for us to send to our counterparty.
let inbound_stats = context.get_inbound_pending_htlc_stats(None);
let outbound_stats = context.get_outbound_pending_htlc_stats(None);
+ let mut balance_msat = context.value_to_self_msat;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
+ balance_msat += htlc.amount_msat;
+ }
+ }
+ balance_msat -= outbound_stats.pending_htlcs_value_msat;
+
let outbound_capacity_msat = context.value_to_self_msat
.saturating_sub(outbound_stats.pending_htlcs_value_msat)
.saturating_sub(
outbound_capacity_msat,
next_outbound_htlc_limit_msat: available_capacity_msat,
next_outbound_htlc_minimum_msat,
+ balance_msat,
}
}
}
}
- pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
- self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
+ /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
+ /// happened.
+ pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
+ let new_forwarding_info = Some(CounterpartyForwardingInfo {
fee_base_msat: msg.contents.fee_base_msat,
fee_proportional_millionths: msg.contents.fee_proportional_millionths,
cltv_expiry_delta: msg.contents.cltv_expiry_delta
});
+ let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
+ if did_change {
+ self.context.counterparty_forwarding_info = new_forwarding_info;
+ }
- Ok(())
+ Ok(did_change)
}
/// Begins the shutdown process, getting a message for the remote peer and returning all
},
signature: Signature::from(unsafe { FFISignature::new() })
};
- node_a_chan.channel_update(&update).unwrap();
+ assert!(node_a_chan.channel_update(&update).unwrap());
// The counterparty can send an update with a higher minimum HTLC, but that shouldn't
// change our official htlc_minimum_msat.
},
None => panic!("expected counterparty forwarding info to be Some")
}
+
+ assert!(!node_a_chan.channel_update(&update).unwrap());
}
#[cfg(feature = "_test_vectors")]
}
/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
-///
-/// Balances of a channel are available through [`ChainMonitor::get_claimable_balances`] and
-/// [`ChannelMonitor::get_claimable_balances`], calculated with respect to the corresponding on-chain
-/// transactions.
-///
-/// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances
#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
///
/// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
pub feerate_sat_per_1000_weight: Option<u32>,
+ /// Our total balance. This is the amount we would get if we close the channel.
+ /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
+ /// amount is not likely to be recoverable on close.
+ ///
+ /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
+ /// balance is not available for inclusion in new outbound HTLCs). This further does not include
+ /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
+ /// This does not consider any on-chain fees.
+ ///
+ /// See also [`ChannelDetails::outbound_capacity_msat`]
+ pub balance_msat: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
///
+ /// See also [`ChannelDetails::balance_msat`]
+ ///
/// This value is not exact. Due to various in-flight changes, feerate changes, and our
/// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
/// should be able to spend nearly this amount.
/// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
/// to use a limit as close as possible to the HTLC limit we can currently send.
///
- /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`] and
- /// [`ChannelDetails::outbound_capacity_msat`].
+ /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+ /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
pub next_outbound_htlc_limit_msat: u64,
/// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
/// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
channel_value_satoshis: context.get_value_satoshis(),
feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
unspendable_punishment_reserve: to_self_reserve_satoshis,
+ balance_msat: balance.balance_msat,
inbound_capacity_msat: balance.inbound_capacity_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
/// In general, a path may raise:
/// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
/// node public key) is specified.
- /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
- /// (including due to previous monitor update failure or new permanent monitor update
- /// failure).
+ /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available as it has been
+ /// closed, doesn't exist, or the peer is currently disconnected.
/// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
- try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
+ let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
+ // If nothing changed after applying their update, we don't need to bother
+ // persisting.
+ if !did_change {
+ return Ok(NotifyOption::SkipPersistNoEvents);
+ }
}
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
(10, self.channel_value_satoshis, required),
(12, self.unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, self.next_outbound_htlc_limit_msat, required), // Forwards compatibility for removed balance_msat field.
+ (16, self.balance_msat, required),
(18, self.outbound_capacity_msat, required),
(19, self.next_outbound_htlc_limit_msat, required),
(20, self.inbound_capacity_msat, required),
(10, channel_value_satoshis, required),
(12, unspendable_punishment_reserve, option),
(14, user_channel_id_low, required),
- (16, _balance_msat, option), // Backwards compatibility for removed balance_msat field.
+ (16, balance_msat, required),
(18, outbound_capacity_msat, required),
// Note that by the time we get past the required read above, outbound_capacity_msat will be
// filled in, so we can safely unwrap it here.
let user_channel_id = user_channel_id_low as u128 +
((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
- let _balance_msat: Option<u64> = _balance_msat;
-
Ok(Self {
inbound_scid_alias,
channel_id: channel_id.0.unwrap(),
channel_value_satoshis: channel_value_satoshis.0.unwrap(),
unspendable_punishment_reserve,
user_channel_id,
+ balance_msat: balance_msat.0.unwrap(),
outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
// First confirm the commitment transaction on nodes[0], which should leave us with three
// claimable balances.
let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32;
- mine_transaction(&nodes[0], &as_txn[0]);
+ let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &as_txn[0]));
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
// Connect blocks until the commitment transaction's CSV expires, providing us the relevant
// `SpendableOutputs` event and removing the claimable balance entry.
- connect_blocks(&nodes[0], node_a_commitment_claimable - nodes[0].best_block_info().1);
+ connect_blocks(&nodes[0], node_a_commitment_claimable - nodes[0].best_block_info().1 - 1);
+ assert!(get_monitor!(nodes[0], chan_id)
+ .get_spendable_outputs(&as_txn[0], commitment_tx_conf_height_a).is_empty());
+ connect_blocks(&nodes[0], 1);
assert_eq!(vec![Balance::ClaimableAwaitingConfirmations {
amount_satoshis: 10_000,
confirmation_height: node_a_htlc_claimable,
}],
nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
- test_spendable_output(&nodes[0], &as_txn[0]);
+ let to_self_spendable_output = test_spendable_output(&nodes[0], &as_txn[0]);
+ assert_eq!(
+ get_monitor!(nodes[0], chan_id).get_spendable_outputs(&as_txn[0], commitment_tx_conf_height_a),
+ to_self_spendable_output
+ );
// Connect blocks until the HTLC-Timeout's CSV expires, providing us the relevant
// `SpendableOutputs` event and removing the claimable balance entry.
//! Further functional tests which test blockchain reorganizations.
+use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::OutPoint;
use crate::chain::Confirm;
-use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination, MessageSendEvent};
use crate::ln::msgs::{ChannelMessageHandler, Init};
use crate::util::test_utils;
use crate::util::ser::Writeable;
do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstReorgsOnlyTip);
do_test_to_remote_after_local_detection(ConnectStyle::FullBlockViaListen);
}
+
+#[test]
+fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reorg() {
+ // We detect a counterparty commitment confirm onchain, followed by a reorg and a confirmation
+ // of a holder commitment. Then, if we learn of the preimage for an HTLC in both commitments,
+ // test that we only claim the currently confirmed commitment.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Route an HTLC which we will claim onchain with the preimage.
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ // Force close with the latest counterparty commitment, confirm it, and reorg it with the latest
+ // holder commitment.
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_added_monitors(&nodes[0], 1);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_added_monitors(&nodes[1], 1);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+
+ let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let commitment_tx_a = txn.pop().unwrap();
+ check_spends!(commitment_tx_a, funding_tx);
+
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let commitment_tx_b = txn.pop().unwrap();
+ check_spends!(commitment_tx_b, funding_tx);
+
+ mine_transaction(&nodes[0], &commitment_tx_a);
+ mine_transaction(&nodes[1], &commitment_tx_a);
+
+ disconnect_blocks(&nodes[0], 1);
+ disconnect_blocks(&nodes[1], 1);
+
+ mine_transaction(&nodes[0], &commitment_tx_b);
+ mine_transaction(&nodes[1], &commitment_tx_b);
+
+ // Provide the preimage now, such that we only claim from the holder commitment (since it's
+ // currently confirmed) and not the counterparty's.
+ get_monitor!(nodes[1], chan_id).provide_payment_preimage(
+ &payment_hash, &payment_preimage, &nodes[1].tx_broadcaster,
+ &LowerBoundedFeeEstimator(nodes[1].fee_estimator), &nodes[1].logger
+ );
+
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let htlc_success_tx = txn.pop().unwrap();
+ check_spends!(htlc_success_tx, commitment_tx_b);
+}
+
+#[test]
+fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterparty_commitment_reorg() {
+ // We detect a counterparty commitment confirm onchain, followed by a reorg and a
+ // confirmation of the previous (still unrevoked) counterparty commitment. Then, if we learn
+ // of the preimage for an HTLC in both commitments, test that we only claim the currently
+ // confirmed commitment.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Route an HTLC which we will claim onchain with the preimage.
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ // Obtain the current commitment, which will become the previous after a fee update.
+ let prev_commitment_a = &get_local_commitment_txn!(nodes[0], chan_id)[0];
+
+ *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 4;
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors(&nodes[0], 1);
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ let (update_fee, commit_sig) = if let MessageSendEvent::UpdateHTLCs { node_id, mut updates } = msg_events.pop().unwrap() {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ (updates.update_fee.take().unwrap(), updates.commitment_signed)
+ } else {
+ panic!("Unexpected message send event");
+ };
+
+ // Handle the fee update on the other side, but don't send the last RAA such that the previous
+ // commitment is still valid (unrevoked).
+ nodes[1].node().handle_update_fee(&nodes[0].node.get_our_node_id(), &update_fee);
+ let _last_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[0], commit_sig, false, true, false, true);
+
+ // Force close with the latest commitment, confirm it, and reorg it with the previous commitment.
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_added_monitors(&nodes[0], 1);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+
+ let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let current_commitment_a = txn.pop().unwrap();
+ assert_ne!(current_commitment_a.txid(), prev_commitment_a.txid());
+ check_spends!(current_commitment_a, funding_tx);
+
+ mine_transaction(&nodes[0], ¤t_commitment_a);
+ mine_transaction(&nodes[1], ¤t_commitment_a);
+
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_added_monitors(&nodes[1], 1);
+ check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
+
+ disconnect_blocks(&nodes[0], 1);
+ disconnect_blocks(&nodes[1], 1);
+
+ mine_transaction(&nodes[0], &prev_commitment_a);
+ mine_transaction(&nodes[1], &prev_commitment_a);
+
+ // Provide the preimage now, such that we only claim from the previous commitment (since it's
+ // currently confirmed) and not the latest.
+ get_monitor!(nodes[1], chan_id).provide_payment_preimage(
+ &payment_hash, &payment_preimage, &nodes[1].tx_broadcaster,
+ &LowerBoundedFeeEstimator(nodes[1].fee_estimator), &nodes[1].logger
+ );
+
+ let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ let htlc_preimage_tx = txn.pop().unwrap();
+ check_spends!(htlc_preimage_tx, prev_commitment_a);
+ // Make sure it was indeed a preimage claim and not a revocation claim since the previous
+ // commitment (still unrevoked) is the currently confirmed closing transaction.
+ assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
+}
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 100_000, 0, None).unwrap();
let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- // P2WSH
+ // Create a dummy P2WPKH script
let script = Builder::new().push_int(0)
.push_slice(&[0; 20])
.into_script();
inbound_scid_alias: None,
channel_value_satoshis: 0,
user_channel_id: 0,
+ balance_msat: 0,
outbound_capacity_msat,
next_outbound_htlc_limit_msat: outbound_capacity_msat,
next_outbound_htlc_minimum_msat: 0,
outbound_scid_alias: None,
channel_value_satoshis: 10_000_000_000,
user_channel_id: 0,
+ balance_msat: 10_000_000_000,
outbound_capacity_msat: 10_000_000_000,
next_outbound_htlc_minimum_msat: 0,
next_outbound_htlc_limit_msat: 10_000_000_000,
+++ /dev/null
-* The `AvailableBalances::balance_msat` field has been removed in favor of `ChannelMonitor::get_claimable_balances`. `ChannelDetails` serialized with versions of LDK >= 0.0.117 will have their `balance_msat` field set to `next_outbound_htlc_limit_msat` when read by versions of LDK prior to 0.0.117 (#2476).