}
}
+ /// On startup, its possible we detect some monitor updates have actually completed (and the
+ /// ChannelManager was simply stale). In that case, we should simply drop them, which we do
+ /// here after logging them.
+ pub fn on_startup_drop_completed_blocked_mon_updates_through<L: Logger>(&mut self, logger: &L, loaded_mon_update_id: u64) {
+ let channel_id = self.context.channel_id();
+ self.context.blocked_monitor_updates.retain(|update| {
+ if update.update.update_id <= loaded_mon_update_id {
+ log_info!(
+ logger,
+ "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager",
+ update.update.update_id,
+ channel_id,
+ );
+ false
+ } else {
+ true
+ }
+ });
+ }
+
pub fn blocked_monitor_updates_pending(&self) -> usize {
self.context.blocked_monitor_updates.len()
}
}
}
} else {
- log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+ channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
+ log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
- monitor.get_latest_update_id());
+ monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
do_test_monitor_claims_with_random_signatures(true, false);
do_test_monitor_claims_with_random_signatures(true, true);
}
+
+#[test]
+fn test_event_replay_causing_monitor_replay() {
+ // In LDK 0.0.121 there was a bug where if a `PaymentSent` event caused an RAA
+ // `ChannelMonitorUpdate` hold and then the node was restarted after the `PaymentSent` event
+ // and `ChannelMonitorUpdate` both completed but without persisting the `ChannelManager` we'd
+ // replay the `ChannelMonitorUpdate` on restart (which is fine, but triggered a safety panic).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let persister;
+ let new_chain_monitor;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let node_deserialized;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
+
+ let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
+
+ do_claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage);
+
+ // At this point the `PaymentSent` event has not been processed but the full commitment signed
+ // dance has completed.
+ let serialized_channel_manager = nodes[0].node.encode();
+
+ // Now process the `PaymentSent` to get the final RAA `ChannelMonitorUpdate`, checking that it
+ // resulted in a `ChannelManager` persistence request.
+ nodes[0].node.get_and_clear_needs_persistence();
+ expect_payment_sent(&nodes[0], payment_preimage, None, true, true /* expected post-event monitor update*/);
+ assert!(nodes[0].node.get_and_clear_needs_persistence());
+
+ let serialized_monitor = get_monitor!(nodes[0], chan.2).encode();
+ reload_node!(nodes[0], &serialized_channel_manager, &[&serialized_monitor], persister, new_chain_monitor, node_deserialized);
+
+ // Expect the `PaymentSent` to get replayed, this time without the duplicate monitor update
+ expect_payment_sent(&nodes[0], payment_preimage, None, false, false /* expected post-event monitor update*/);
+}