X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fmain%2Fjava%2Forg%2Fldk%2Fstructs%2FBackgroundProcessor.java;h=cb7674ab79d603e48ba555ad1ab6926fcddf9b6c;hb=07d5d868dfe064aadb28a7f7ca6002c16be9723d;hp=e66111bf2497de877dfbb1a4caf2d89874a0bce7;hpb=64bcaa6a2a2f05653c14b9cb8bb97ab2480eaaa5;p=ldk-java diff --git a/src/main/java/org/ldk/structs/BackgroundProcessor.java b/src/main/java/org/ldk/structs/BackgroundProcessor.java index e66111bf..cb7674ab 100644 --- a/src/main/java/org/ldk/structs/BackgroundProcessor.java +++ b/src/main/java/org/ldk/structs/BackgroundProcessor.java @@ -16,10 +16,10 @@ import javax.annotation.Nullable; * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so, * writing it to disk/backups by invoking the callback given to it at startup. * [`ChannelManager`] persistence should be done in the background. - * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`] - * at the appropriate intervals. - * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to - * [`BackgroundProcessor::start`]). + * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`] + * and [`PeerManager::timer_tick_occurred`] at the appropriate intervals. + * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a + * [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]). * * It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied * upon as doing so may result in high latency. @@ -32,7 +32,9 @@ import javax.annotation.Nullable; * unilateral chain closure fees are at risk. * * [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor - * [`Event`]: lightning::util::events::Event + * [`Event`]: lightning::events::Event + * [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred + * [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events * BackgroundProcessor will immediately stop on drop. It should be stored until shutdown. */ @SuppressWarnings("unchecked") // We correctly assign various generic arrays @@ -49,17 +51,21 @@ public class BackgroundProcessor extends CommonBase { * documentation]. * * The thread runs indefinitely unless the object is dropped, [`stop`] is called, or - * `persist_manager` returns an error. In case of an error, the error is retrieved by calling + * [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling * either [`join`] or [`stop`]. * * # Data Persistence * - * `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or + * [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or * uploading to one or more backup services. See [`ChannelManager::write`] for writing out a - * [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's + * [`ChannelManager`]. See the `lightning-persister` crate for LDK's * provided implementation. * - * Typically, users should either implement [`ChannelManagerPersister`] to never return an + * [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if + * [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`]. + * See the `lightning-persister` crate for LDK's provided implementation. + * + * Typically, users should either implement [`Persister::persist_manager`] to never return an * error or call [`join`] and handle any error that may arise. For the latter case, * `BackgroundProcessor` must be restarted by calling `start` again after handling the error. * @@ -68,37 +74,45 @@ public class BackgroundProcessor extends CommonBase { * `event_handler` is responsible for handling events that users should be notified of (e.g., * payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common * functionality implemented by other handlers. - * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures. + * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures. + * + * # Rapid Gossip Sync + * + * If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync` + * to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance + * until the [`RapidGossipSync`] instance completes its first sync. * * [top-level documentation]: BackgroundProcessor * [`join`]: Self::join * [`stop`]: Self::stop * [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager * [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable - * [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager - * [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph - * - * Note that net_graph_msg_handler (or a relevant inner pointer) may be NULL or all-0s to represent None + * [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager + * [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph + * [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph + * [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable */ - public static BackgroundProcessor start(ChannelManagerPersister persister, EventHandler event_handler, ChainMonitor chain_monitor, ChannelManager channel_manager, @Nullable NetGraphMsgHandler net_graph_msg_handler, PeerManager peer_manager, Logger logger) { - long ret = bindings.BackgroundProcessor_start(persister == null ? 0 : persister.ptr, event_handler == null ? 0 : event_handler.ptr, chain_monitor == null ? 0 : chain_monitor.ptr & ~1, channel_manager == null ? 0 : channel_manager.ptr & ~1, net_graph_msg_handler == null ? 0 : net_graph_msg_handler.ptr & ~1, peer_manager == null ? 0 : peer_manager.ptr & ~1, logger == null ? 0 : logger.ptr); + public static BackgroundProcessor start(org.ldk.structs.Persister persister, org.ldk.structs.EventHandler event_handler, org.ldk.structs.ChainMonitor chain_monitor, org.ldk.structs.ChannelManager channel_manager, org.ldk.structs.GossipSync gossip_sync, org.ldk.structs.PeerManager peer_manager, org.ldk.structs.Logger logger, org.ldk.structs.Option_WriteableScoreZ scorer) { + long ret = bindings.BackgroundProcessor_start(persister.ptr, event_handler.ptr, chain_monitor == null ? 0 : chain_monitor.ptr, channel_manager == null ? 0 : channel_manager.ptr, gossip_sync.ptr, peer_manager == null ? 0 : peer_manager.ptr, logger.ptr, scorer.ptr); Reference.reachabilityFence(persister); Reference.reachabilityFence(event_handler); Reference.reachabilityFence(chain_monitor); Reference.reachabilityFence(channel_manager); - Reference.reachabilityFence(net_graph_msg_handler); + Reference.reachabilityFence(gossip_sync); Reference.reachabilityFence(peer_manager); Reference.reachabilityFence(logger); + Reference.reachabilityFence(scorer); if (ret >= 0 && ret <= 4096) { return null; } - BackgroundProcessor ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new BackgroundProcessor(null, ret); } - ret_hu_conv.ptrs_to.add(ret_hu_conv); - ret_hu_conv.ptrs_to.add(persister); - ret_hu_conv.ptrs_to.add(event_handler); - ret_hu_conv.ptrs_to.add(chain_monitor); - ret_hu_conv.ptrs_to.add(channel_manager); - ret_hu_conv.ptrs_to.add(net_graph_msg_handler); - ret_hu_conv.ptrs_to.add(peer_manager); - ret_hu_conv.ptrs_to.add(logger); + org.ldk.structs.BackgroundProcessor ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new org.ldk.structs.BackgroundProcessor(null, ret); } + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(ret_hu_conv); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(persister); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(event_handler); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(chain_monitor); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(channel_manager); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(gossip_sync); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(peer_manager); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(logger); }; + if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(scorer); }; return ret_hu_conv; } @@ -113,12 +127,12 @@ public class BackgroundProcessor extends CommonBase { * * [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager */ - public Result_NoneErrorZ join() { + public Result_NoneIOErrorZ join() { long ret = bindings.BackgroundProcessor_join(this.ptr); Reference.reachabilityFence(this); if (ret >= 0 && ret <= 4096) { return null; } - Result_NoneErrorZ ret_hu_conv = Result_NoneErrorZ.constr_from_ptr(ret); - this.ptrs_to.add(this); + Result_NoneIOErrorZ ret_hu_conv = Result_NoneIOErrorZ.constr_from_ptr(ret); + if (this != null) { this.ptrs_to.add(this); }; // Due to rust's strict-ownership memory model, in some cases we need to "move" // an object to pass exclusive ownership to the function being called. // In most cases, we avoid this being visible in GC'd languages by cloning the object @@ -141,12 +155,12 @@ public class BackgroundProcessor extends CommonBase { * * [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager */ - public Result_NoneErrorZ stop() { + public Result_NoneIOErrorZ stop() { long ret = bindings.BackgroundProcessor_stop(this.ptr); Reference.reachabilityFence(this); if (ret >= 0 && ret <= 4096) { return null; } - Result_NoneErrorZ ret_hu_conv = Result_NoneErrorZ.constr_from_ptr(ret); - this.ptrs_to.add(this); + Result_NoneIOErrorZ ret_hu_conv = Result_NoneIOErrorZ.constr_from_ptr(ret); + if (this != null) { this.ptrs_to.add(this); }; // Due to rust's strict-ownership memory model, in some cases we need to "move" // an object to pass exclusive ownership to the function being called. // In most cases, we avoid this being visible in GC'd languages by cloning the object