-use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
use lightning::events::ClosureReason;
use lightning::ln::functional_test_utils::{
connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block,
check_added_monitors!(nodes[1], 1);
// Make sure everything is persisted as expected after close.
- check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
+ check_persisted_data!(11);
}
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
- /// The only instances we allow where update_id values are not strictly increasing have a
- /// special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. This update ID is used for updates that
- /// will force close the channel by broadcasting the latest commitment transaction or
- /// special post-force-close updates, like providing preimages necessary to claim outputs on the
- /// broadcast commitment transaction. See its docs for more details.
+ /// Note that for [`ChannelMonitorUpdate`]s generated on LDK versions prior to 0.1 after the
+ /// channel was closed, this value may be [`u64::MAX`]. In that case, multiple updates may
+ /// appear with the same ID, and all should be replayed.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
pub channel_id: Option<ChannelId>,
}
-/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
-///
-/// (1) attempting to force close the channel by broadcasting our latest commitment transaction or
-/// (2) providing a preimage (after the channel has been force closed) from a forward link that
-/// allows us to spend an HTLC output on this channel's (the backward link's) broadcasted
-/// commitment transaction.
-///
-/// No other [`ChannelMonitorUpdate`]s are allowed after force-close.
-pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
+/// LDK prior to 0.1 used this constant as the [`ChannelMonitorUpdate::update_id`] for any
+/// [`ChannelMonitorUpdate`]s which were generated after the channel was closed.
+const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
impl Writeable for ChannelMonitorUpdate {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
/// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
/// ChannelMonitor.
+ ///
+ /// Note that for channels closed prior to LDK 0.1, this may return [`u64::MAX`].
pub fn get_latest_update_id(&self) -> u64 {
self.inner.lock().unwrap().get_latest_update_id()
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- if self.latest_update_id == CLOSED_CHANNEL_UPDATE_ID && updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
- log_info!(logger, "Applying post-force-closed update to monitor {} with {} change(s).",
+ if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID {
+ log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).",
log_funding_info!(self), updates.updates.len());
- } else if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
- log_info!(logger, "Applying force close update to monitor {} with {} change(s).",
+ } else if updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID {
+ log_info!(logger, "Applying pre-0.1 force close update to monitor {} with {} change(s).",
log_funding_info!(self), updates.updates.len());
} else {
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).",
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
// them as well.
- if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
+ if updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID || self.lockdown_from_offchain {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
// We should have already seen a `ChannelForceClosed` update if we're trying to
// provide a preimage at this point.
ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
- debug_assert_eq!(self.latest_update_id, CLOSED_CHANNEL_UPDATE_ID),
+ debug_assert!(self.lockdown_from_offchain),
_ => {
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
self.counterparty_commitment_txs_from_update(updates);
}
- // If the updates succeeded and we were in an already closed channel state, then there's no
- // need to refuse any updates we expect to receive afer seeing a confirmed commitment.
- if ret.is_ok() && updates.update_id == CLOSED_CHANNEL_UPDATE_ID && self.latest_update_id == updates.update_id {
- return Ok(());
- }
-
self.latest_update_id = updates.update_id;
- // Refuse updates after we've detected a spend onchain, but only if we haven't processed a
- // force closed monitor update yet.
- if ret.is_ok() && self.funding_spend_seen && self.latest_update_id != CLOSED_CHANNEL_UPDATE_ID {
+ // Refuse updates after we've detected a spend onchain (or if the channel was otherwise
+ // closed), but only if the update isn't the kind of update we expect to see after channel
+ // closure.
+ let mut is_pre_close_update = false;
+ for update in updates.updates.iter() {
+ match update {
+ ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { .. }
+ |ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. }
+ |ChannelMonitorUpdateStep::ShutdownScript { .. }
+ |ChannelMonitorUpdateStep::CommitmentSecret { .. } =>
+ is_pre_close_update = true,
+ // After a channel is closed, we don't communicate with our peer about it, so the
+ // only things we will update is getting a new preimage (from a different channel)
+ // or being told that the channel is closed. All other updates are generated while
+ // talking to our peer.
+ ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
+ ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
+ }
+ }
+
+ if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update {
log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent");
Err(())
} else { ret }
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::sign::ecdsa::EcdsaChannelSigner;
use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if !self.channel_state.is_pre_funded_state() {
- self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+ self.latest_monitor_update_id += 1;
Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
counterparty_node_id: Some(self.counterparty_node_id),
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::events;
use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
/// will remove a preimage that needs to be durably in an upstream channel first), we put an
/// entry here to note that the channel with the key's ID is blocked on a set of actions.
actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
+ /// The latest [`ChannelMonitor::get_latest_update_id`] value for all closed channels as they
+ /// exist on-disk/in our [`chain::Watch`]. This *ignores* all pending updates not yet applied
+ /// in [`ChannelManager::pending_background_events`].
+ ///
+ /// If there are any updates pending in [`Self::in_flight_monitor_updates`] this will contain
+ /// the highest `update_id` of all the pending in-flight updates.
+ closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
/// The peer is currently connected (i.e. we've seen a
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
/// [`ChannelMessageHandler::peer_disconnected`].
)
&& self.monitor_update_blocked_actions.is_empty()
&& self.in_flight_monitor_updates.is_empty()
+ && self.closed_channel_monitor_update_ids.is_empty()
}
// Returns a count of all channels we have with this peer, including unfunded channels.
/// [`ChannelMonitor`]/channel funding transaction) to begin with.
macro_rules! update_maps_on_chan_removal {
($self: expr, $peer_state: expr, $channel_context: expr) => {{
+ // If there's a possibility that we need to generate further monitor updates for this
+ // channel, we need to store the last update_id of it. However, we don't want to insert
+ // into the map (which prevents the `PeerState` from being cleaned up) for channels that
+ // never even got confirmations (which would open us up to DoS attacks).
+ let mut update_id = $channel_context.get_latest_monitor_update_id();
+ if $channel_context.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth() == Some(0) || update_id > 1 {
+ // There may be some pending background events which we have to ignore when setting the
+ // latest update ID.
+ for event in $self.pending_background_events.lock().unwrap().iter() {
+ match event {
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, channel_id, update, .. } => {
+ if *channel_id == $channel_context.channel_id() && *counterparty_node_id == $channel_context.get_counterparty_node_id() {
+ update_id = cmp::min(update_id, update.update_id - 1);
+ }
+ },
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(..) => {
+ // This is only generated for very old channels which were already closed
+ // on startup, so it should never be present for a channel that is closing
+ // here.
+ },
+ BackgroundEvent::MonitorUpdatesComplete { .. } => {},
+ }
+ }
+ let chan_id = $channel_context.channel_id();
+ $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
+ }
if let Some(outpoint) = $channel_context.get_funding_txo() {
$self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
}
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
+ /// Ensures any saved latest ID in [`PeerState::closed_channel_monitor_update_ids`] is updated,
+ /// then applies the provided [`ChannelMonitorUpdate`].
+ #[must_use]
+ fn apply_post_close_monitor_update(
+ &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
+ mut monitor_update: ChannelMonitorUpdate,
+ ) -> ChannelMonitorUpdateStatus {
+ // Note that there may be some post-close updates which need to be well-ordered with
+ // respect to the `update_id`, so we hold the `closed_channel_monitor_update_ids` lock
+ // here (and also make sure the `monitor_update` we're applying has the right id.
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
+ .expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
+ .lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(channel_id) {
+ hash_map::Entry::Occupied(mut chan_phase) => {
+ if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
+ let in_flight = handle_new_monitor_update!(self, funding_txo,
+ monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
+ return if in_flight { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed };
+ } else {
+ debug_assert!(false, "We shouldn't have an update for a non-funded channel");
+ }
+ },
+ hash_map::Entry::Vacant(_) => {},
+ }
+ match peer_state.closed_channel_monitor_update_ids.entry(channel_id) {
+ btree_map::Entry::Vacant(entry) => {
+ let is_closing_unupdated_monitor = monitor_update.update_id == 1
+ && monitor_update.updates.len() == 1
+ && matches!(&monitor_update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. });
+ // If the ChannelMonitorUpdate is closing a channel that never got past initial
+ // funding (to have any commitment updates), we'll skip inserting in
+ // `update_maps_on_chan_removal`, allowing us to avoid keeping around the PeerState
+ // for that peer. In that specific case we expect no entry in the map here. In any
+ // other cases, this is a bug, but in production we go ahead and recover by
+ // inserting the update_id and hoping its right.
+ debug_assert!(is_closing_unupdated_monitor, "Expected closing monitor against an unused channel, got {:?}", monitor_update);
+ if !is_closing_unupdated_monitor {
+ entry.insert(monitor_update.update_id);
+ }
+ },
+ btree_map::Entry::Occupied(entry) => {
+ // If we're running in a threaded environment its possible we generate updates for
+ // a channel that is closing, then apply some preimage update, then go back and
+ // apply the close monitor update here. In order to ensure the updates are still
+ // well-ordered, we have to use the `closed_channel_monitor_update_ids` map to
+ // override the `update_id`, taking care to handle old monitors where the
+ // `latest_update_id` is already `u64::MAX`.
+ let latest_update_id = entry.into_mut();
+ *latest_update_id = latest_update_id.saturating_add(1);
+ monitor_update.update_id = *latest_update_id;
+ }
+ }
+ self.chain_monitor.update_channel(funding_txo, &monitor_update)
+ }
+
/// When a channel is removed, two things need to happen:
/// (a) [`update_maps_on_chan_removal`] must be called in the same `per_peer_state` lock as
/// the channel-closing action,
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
// ignore the result here.
- let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
+ let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
}
let mut shutdown_results = Vec::new();
if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
- let mut updated_chan = false;
- {
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(channel_id) {
- hash_map::Entry::Occupied(mut chan_phase) => {
- if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
- updated_chan = true;
- handle_new_monitor_update!(self, funding_txo, update.clone(),
- peer_state_lock, peer_state, per_peer_state, chan);
- } else {
- debug_assert!(false, "We shouldn't have an update for a non-funded channel");
- }
- },
- hash_map::Entry::Vacant(_) => {},
- }
- }
- }
- if !updated_chan {
- // TODO: Track this as in-flight even though the channel is closed.
- let _ = self.chain_monitor.update_channel(funding_txo, &update);
- }
+ // The monitor update will be replayed on startup if it doesnt complete, so no
+ // use bothering to care about the monitor update completing.
+ let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
},
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
let per_peer_state = self.per_peer_state.read().unwrap();
}
}
}
+
let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
+ update_id: 0, // apply_post_close_monitor_update will set the right value
counterparty_node_id: prev_hop.counterparty_node_id,
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
if !during_init {
// We update the ChannelMonitor on the backward link, after
// receiving an `update_fulfill_htlc` from the forward link.
- let update_res = self.chain_monitor.update_channel(prev_hop.funding_txo, &preimage_update);
+ let update_res = self.apply_post_close_monitor_update(counterparty_node_id, prev_hop.channel_id, prev_hop.funding_txo, preimage_update);
if update_res != ChannelMonitorUpdateStatus::Completed {
// TODO: This needs to be handled somehow - if we receive a monitor update
// with a preimage we *must* somehow manage to propagate it to the upstream
};
self.pending_background_events.lock().unwrap().push(event);
}
+
// Note that we do process the completion action here. This totally could be a
// duplicate claim, but we have no way of knowing without interrogating the
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
+ closed_channel_monitor_update_ids: BTreeMap::new(),
is_connected: false,
}));
let mut peer_state = peer_state_mutex.lock().unwrap();
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
+ closed_channel_monitor_update_ids: BTreeMap::new(),
is_connected: true,
}));
},
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
+ closed_channel_monitor_update_ids: BTreeMap::new(),
is_connected: false,
}
};
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
- if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
+ if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update {
+ // Our channel information is out of sync with the `ChannelMonitor`, so
+ // force the update to use the `ChannelMonitor`'s update_id for the close
+ // update.
+ let latest_update_id = monitor.get_latest_update_id();
+ update.update_id = latest_update_id.saturating_add(1);
+ per_peer_state.entry(counterparty_node_id)
+ .or_insert_with(|| Mutex::new(empty_peer_state()))
+ .lock().unwrap()
+ .closed_channel_monitor_update_ids.entry(channel_id)
+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
+ .or_insert(latest_update_id);
+
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id, funding_txo, channel_id, update
});
let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
&channel_id);
- let monitor_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: monitor.get_latest_update_id().saturating_add(1),
counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
channel_id: Some(monitor.channel_id()),
update: monitor_update,
};
close_background_events.push(update);
+
+ per_peer_state.entry(counterparty_node_id)
+ .or_insert_with(|| Mutex::new(empty_peer_state()))
+ .lock().unwrap()
+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
+ .and_modify(|v| *v = cmp::max(monitor.get_latest_update_id(), *v))
+ .or_insert(monitor.get_latest_update_id());
} else {
// This is a fairly old `ChannelMonitor` that hasn't seen an update to its
// off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
// Thus, we assume that it has no pending HTLCs and we will not need to
// generate a `ChannelMonitorUpdate` for it aside from this
// `ChannelForceClosed` one.
+ monitor_update.update_id = u64::MAX;
close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
}
}
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
+ let chan_id = nodes[0].node.list_channels()[0].channel_id;
let error_message = "Channel force-closed";
- nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
- check_closed_broadcast!(nodes[0], true);
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000);
{
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
}
}
+ #[test]
+ fn test_drop_peers_when_removing_unfunded_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
+ match events[0] {
+ Event::FundingGenerationReady { .. } => {}
+ _ => panic!("Unexpected event {:?}", events),
+ }
+
+ nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
+ check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
+ check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
+
+ // At this point the state for the peers should have been removed.
+ assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
+ assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
+ }
+
#[test]
fn bad_inbound_payment_hash() {
// Add coverage for checking that a user-provided payment hash matches the payment secret.
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::chain::channelmonitor;
-use crate::chain::channelmonitor::{Balance, CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
assert_eq!(monitor_updates_1.len(), 1);
- assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+ assert_eq!(monitor_updates_1[0].updates.len(), 1);
+ assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
}
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
assert_eq!(monitor_updates_1.len(), 1);
- assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+ assert_eq!(monitor_updates_1[0].updates.len(), 1);
+ assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
assert_eq!(monitor_updates_2.len(), 1);
- assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+ assert_eq!(monitor_updates_2[0].updates.len(), 1);
+ assert!(matches!(monitor_updates_2[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
}
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
match msg_events[0] {
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
-use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep};
use crate::sign::EntropySource;
use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
assert_eq!(monitor_updates_1.len(), 1);
- assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+ assert_eq!(monitor_updates_1[0].updates.len(), 1);
+ assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
}
// The funding transaction should not have been broadcast, but we broadcast the force-close
use crate::chain;
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::chainmonitor::Persist;
-use crate::chain::channelmonitor::{
- ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID,
-};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
use crate::chain::transaction::OutPoint;
use crate::ln::channelmanager::AChannelManager;
use crate::routing::gossip::NetworkGraph;
/// - No full monitor is found in [`KVStore`]
/// - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
/// - LDK commands re-persisting the entire monitor through this function, specifically when
- /// `update` is `None`.
- /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
+ /// `update` is `None`.
+ /// - The update is at [`u64::MAX`], indicating an update generated by pre-0.1 LDK.
fn update_persisted_channel(
&self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
monitor: &ChannelMonitor<ChannelSigner>,
) -> chain::ChannelMonitorUpdateStatus {
+ const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = u64::MAX;
if let Some(update) = update {
- if update.update_id != CLOSED_CHANNEL_UPDATE_ID
- && update.update_id % self.maximum_pending_updates != 0
- {
+ let persist_update = update.update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID
+ && update.update_id % self.maximum_pending_updates != 0;
+ if persist_update {
let monitor_name = MonitorName::from(funding_txo);
let update_name = UpdateName::from(update.update_id);
match self.kv_store.write(
// In case of channel-close monitor update, we need to read old monitor before persisting
// the new one in order to determine the cleanup range.
let maybe_old_monitor = match monitor.get_latest_update_id() {
- CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
+ LEGACY_CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
_ => None,
};
let monitor_update_status = self.persist_new_channel(funding_txo, monitor);
if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
- let cleanup_range =
- if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
- // If there is an error while reading old monitor, we skip clean up.
- maybe_old_monitor.map(|(_, ref old_monitor)| {
- let start = old_monitor.get_latest_update_id();
- // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
- let end = cmp::min(
- start.saturating_add(self.maximum_pending_updates),
- CLOSED_CHANNEL_UPDATE_ID - 1,
- );
- (start, end)
- })
- } else {
- let end = monitor.get_latest_update_id();
- let start = end.saturating_sub(self.maximum_pending_updates);
- Some((start, end))
- };
+ let channel_closed_legacy =
+ monitor.get_latest_update_id() == LEGACY_CLOSED_CHANNEL_UPDATE_ID;
+ let cleanup_range = if channel_closed_legacy {
+ // If there is an error while reading old monitor, we skip clean up.
+ maybe_old_monitor.map(|(_, ref old_monitor)| {
+ let start = old_monitor.get_latest_update_id();
+ // We never persist an update with the legacy closed update_id
+ let end = cmp::min(
+ start.saturating_add(self.maximum_pending_updates),
+ LEGACY_CLOSED_CHANNEL_UPDATE_ID - 1,
+ );
+ (start, end)
+ })
+ } else {
+ let end = monitor.get_latest_update_id();
+ let start = end.saturating_sub(self.maximum_pending_updates);
+ Some((start, end))
+ };
if let Some((start, end)) = cleanup_range {
self.cleanup_in_range(monitor_name, start, end);
// check that when we read it, we got the right update id
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
- // if the CM is at consolidation threshold, ensure no updates are stored.
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
- if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
- || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID
- {
- assert_eq!(
- persister_0
- .kv_store
- .list(
- CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
- monitor_name.as_str()
- )
- .unwrap()
- .len(),
- 0,
- "updates stored when they shouldn't be in persister 0"
- );
- }
+ assert_eq!(
+ persister_0
+ .kv_store
+ .list(
+ CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+ monitor_name.as_str()
+ )
+ .unwrap()
+ .len() as u64,
+ mon.get_latest_update_id() % persister_0_max_pending_updates,
+ "Wrong number of updates stored in persister 0",
+ );
}
persisted_chan_data_1 =
persister_1.read_all_channel_monitors_with_updates().unwrap();
for (_, mon) in persisted_chan_data_1.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
- // if the CM is at consolidation threshold, ensure no updates are stored.
- if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
- || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID
- {
- assert_eq!(
- persister_1
- .kv_store
- .list(
- CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
- monitor_name.as_str()
- )
- .unwrap()
- .len(),
- 0,
- "updates stored when they shouldn't be in persister 1"
- );
- }
+ assert_eq!(
+ persister_1
+ .kv_store
+ .list(
+ CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+ monitor_name.as_str()
+ )
+ .unwrap()
+ .len() as u64,
+ mon.get_latest_update_id() % persister_1_max_pending_updates,
+ "Wrong number of updates stored in persister 1",
+ );
}
};
}
check_added_monitors!(nodes[1], 1);
// Make sure everything is persisted as expected after close.
- check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
-
- // Make sure the expected number of stale updates is present.
- let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap();
- let (_, monitor) = &persisted_chan_data[0];
- let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
- // The channel should have 0 updates, as it wrote a full monitor and consolidated.
- assert_eq!(
- persister_0
- .kv_store
- .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())
- .unwrap()
- .len(),
- 0
- );
- assert_eq!(
- persister_1
- .kv_store
- .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())
- .unwrap()
- .len(),
- 0
+ check_persisted_data!(
+ persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1
);
}
UpdateName::from(1).as_str()
)
.is_err());
-
- // Force close.
- let chan_id = nodes[0].node.list_channels()[0].channel_id;
- let node_id_1 = nodes[1].node.get_our_node_id();
- let err_msg = "Channel force-closed".to_string();
- nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_id_1, err_msg).unwrap();
- let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
- check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 100000);
- check_closed_broadcast!(nodes[0], true);
- check_added_monitors!(nodes[0], 1);
-
- // Write an update near u64::MAX
- persister_0
- .kv_store
- .write(
- CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
- monitor_name.as_str(),
- UpdateName::from(u64::MAX - 1).as_str(),
- &[0u8; 1],
- )
- .unwrap();
-
- // Do the stale update cleanup
- persister_0.cleanup_stale_updates(false).unwrap();
-
- // Confirm the stale update is unreadable/gone
- assert!(persister_0
- .kv_store
- .read(
- CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
- monitor_name.as_str(),
- UpdateName::from(u64::MAX - 1).as_str()
- )
- .is_err());
}
fn persist_fn<P: Deref, ChannelSigner: EcdsaChannelSigner>(_persist: P) -> bool