1 package org.ldk.structs;
3 import org.ldk.impl.bindings;
4 import org.ldk.enums.*;
6 import java.util.Arrays;
7 import java.lang.ref.Reference;
8 import javax.annotation.Nullable;
11 * `Persist` defines behavior for persisting channel monitors: this could mean
12 * writing once to disk, and/or uploading to one or more backup services.
14 * Persistence can happen in one of two ways - synchronously completing before the trait method
15 * calls return or asynchronously in the background.
17 * # For those implementing synchronous persistence
19 * If persistence completes fully (including any relevant `fsync()` calls), the implementation
20 * should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
23 * If persistence fails for some reason, implementations should consider returning
24 * [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
25 * the background with [`ChainMonitor::list_pending_monitor_updates`] and
26 * [`ChainMonitor::get_monitor`].
28 * Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
29 * be marked as complete via [`ChainMonitor::channel_monitor_updated`].
31 * If at some point no further progress can be made towards persisting the pending updates, the
32 * node should simply shut down.
34 * If the persistence has failed and cannot be retried further (e.g. because of an outage),
35 * [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
36 * an immediate panic and future operations in LDK generally failing.
38 * # For those implementing asynchronous persistence
40 * All calls should generally spawn a background task and immediately return
41 * [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
42 * [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
43 * [`MonitorUpdateId`].
45 * Note that unlike the direct [`chain::Watch`] interface,
46 * [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
48 * If at some point no further progress can be made towards persisting a pending update, the node
49 * should simply shut down. Until then, the background task should either loop indefinitely, or
50 * persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
51 * and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
52 * monitor updates may be marked completed).
54 * # Using remote watchtowers
56 * Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
57 * update process described above while the watchtower is being updated. The following methods are
58 * provided for bulding transactions for a watchtower:
59 * [`ChannelMonitor::initial_counterparty_commitment_tx`],
60 * [`ChannelMonitor::counterparty_commitment_txs_from_update`],
61 * [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
62 * [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
64 * [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
65 * [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
67 @SuppressWarnings("unchecked") // We correctly assign various generic arrays
68 public class Persist extends CommonBase {
69 final bindings.LDKPersist bindings_instance;
70 Persist(Object _dummy, long ptr) { super(ptr); bindings_instance = null; }
71 private Persist(bindings.LDKPersist arg) {
72 super(bindings.LDKPersist_new(arg));
73 this.ptrs_to.add(arg);
74 this.bindings_instance = arg;
76 @Override @SuppressWarnings("deprecation")
77 protected void finalize() throws Throwable {
78 if (ptr != 0) { bindings.Persist_free(ptr); } super.finalize();
81 * Destroys the object, freeing associated resources. After this call, any access
82 * to this object may result in a SEGFAULT or worse.
84 * You should generally NEVER call this method. You should let the garbage collector
85 * do this for you when it finalizes objects. However, it may be useful for types
86 * which represent locks and should be closed immediately to avoid holding locks
89 public void destroy() {
90 if (ptr != 0) { bindings.Persist_free(ptr); }
93 public static interface PersistInterface {
95 * Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
96 * called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
98 * The data can be stored any way you want, but the identifier provided by LDK is the
99 * channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
100 * and the stored channel data). Note that you **must** persist every new monitor to disk.
102 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
103 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
105 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
106 * and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
108 * [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
109 * [`Writeable::write`]: crate::util::ser::Writeable::write
111 ChannelMonitorUpdateStatus persist_new_channel(OutPoint channel_funding_outpoint, ChannelMonitor data, MonitorUpdateId update_id);
113 * Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
116 * Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
117 * updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
120 * During blockchain synchronization operations, and in some rare cases, this may be called with
121 * no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
122 * Note that after the full [`ChannelMonitor`] is persisted any previous
123 * [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
124 * applied to the persisted [`ChannelMonitor`] as they were already applied.
126 * If an implementer chooses to persist the updates only, they need to make
127 * sure that all the updates are applied to the `ChannelMonitors` *before
128 * the set of channel monitors is given to the `ChannelManager`
129 * deserialization routine. See [`ChannelMonitor::update_monitor`] for
130 * applying a monitor update to a monitor. If full `ChannelMonitors` are
131 * persisted, then there is no need to persist individual updates.
133 * Note that there could be a performance tradeoff between persisting complete
134 * channel monitors on every update vs. persisting only updates and applying
135 * them in batches. The size of each monitor grows `O(number of state updates)`
136 * whereas updates are small and `O(1)`.
138 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
139 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
141 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
142 * [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
143 * [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
145 * [`Writeable::write`]: crate::util::ser::Writeable::write
147 * Note that update (or a relevant inner pointer) may be NULL or all-0s to represent None
149 ChannelMonitorUpdateStatus update_persisted_channel(OutPoint channel_funding_outpoint, ChannelMonitorUpdate update, ChannelMonitor data, MonitorUpdateId update_id);
151 * Prevents the channel monitor from being loaded on startup.
153 * Archiving the data in a backup location (rather than deleting it fully) is useful for
154 * hedging against data loss in case of unexpected failure.
156 void archive_persisted_channel(OutPoint channel_funding_outpoint);
158 private static class LDKPersistHolder { Persist held; }
159 public static Persist new_impl(PersistInterface arg) {
160 final LDKPersistHolder impl_holder = new LDKPersistHolder();
161 impl_holder.held = new Persist(new bindings.LDKPersist() {
162 @Override public ChannelMonitorUpdateStatus persist_new_channel(long channel_funding_outpoint, long data, long update_id) {
163 org.ldk.structs.OutPoint channel_funding_outpoint_hu_conv = null; if (channel_funding_outpoint < 0 || channel_funding_outpoint > 4096) { channel_funding_outpoint_hu_conv = new org.ldk.structs.OutPoint(null, channel_funding_outpoint); }
164 if (channel_funding_outpoint_hu_conv != null) { channel_funding_outpoint_hu_conv.ptrs_to.add(this); };
165 org.ldk.structs.ChannelMonitor data_hu_conv = null; if (data < 0 || data > 4096) { data_hu_conv = new org.ldk.structs.ChannelMonitor(null, data); }
166 org.ldk.structs.MonitorUpdateId update_id_hu_conv = null; if (update_id < 0 || update_id > 4096) { update_id_hu_conv = new org.ldk.structs.MonitorUpdateId(null, update_id); }
167 if (update_id_hu_conv != null) { update_id_hu_conv.ptrs_to.add(this); };
168 ChannelMonitorUpdateStatus ret = arg.persist_new_channel(channel_funding_outpoint_hu_conv, data_hu_conv, update_id_hu_conv);
169 Reference.reachabilityFence(arg);
172 @Override public ChannelMonitorUpdateStatus update_persisted_channel(long channel_funding_outpoint, long update, long data, long update_id) {
173 org.ldk.structs.OutPoint channel_funding_outpoint_hu_conv = null; if (channel_funding_outpoint < 0 || channel_funding_outpoint > 4096) { channel_funding_outpoint_hu_conv = new org.ldk.structs.OutPoint(null, channel_funding_outpoint); }
174 if (channel_funding_outpoint_hu_conv != null) { channel_funding_outpoint_hu_conv.ptrs_to.add(this); };
175 org.ldk.structs.ChannelMonitorUpdate update_hu_conv = null; if (update < 0 || update > 4096) { update_hu_conv = new org.ldk.structs.ChannelMonitorUpdate(null, update); }
176 if (update_hu_conv != null) { update_hu_conv.ptrs_to.add(this); };
177 org.ldk.structs.ChannelMonitor data_hu_conv = null; if (data < 0 || data > 4096) { data_hu_conv = new org.ldk.structs.ChannelMonitor(null, data); }
178 org.ldk.structs.MonitorUpdateId update_id_hu_conv = null; if (update_id < 0 || update_id > 4096) { update_id_hu_conv = new org.ldk.structs.MonitorUpdateId(null, update_id); }
179 if (update_id_hu_conv != null) { update_id_hu_conv.ptrs_to.add(this); };
180 ChannelMonitorUpdateStatus ret = arg.update_persisted_channel(channel_funding_outpoint_hu_conv, update_hu_conv, data_hu_conv, update_id_hu_conv);
181 Reference.reachabilityFence(arg);
184 @Override public void archive_persisted_channel(long channel_funding_outpoint) {
185 org.ldk.structs.OutPoint channel_funding_outpoint_hu_conv = null; if (channel_funding_outpoint < 0 || channel_funding_outpoint > 4096) { channel_funding_outpoint_hu_conv = new org.ldk.structs.OutPoint(null, channel_funding_outpoint); }
186 if (channel_funding_outpoint_hu_conv != null) { channel_funding_outpoint_hu_conv.ptrs_to.add(this); };
187 arg.archive_persisted_channel(channel_funding_outpoint_hu_conv);
188 Reference.reachabilityFence(arg);
191 return impl_holder.held;
194 * Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
195 * called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
197 * The data can be stored any way you want, but the identifier provided by LDK is the
198 * channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
199 * and the stored channel data). Note that you **must** persist every new monitor to disk.
201 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
202 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
204 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
205 * and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
207 * [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
208 * [`Writeable::write`]: crate::util::ser::Writeable::write
210 public ChannelMonitorUpdateStatus persist_new_channel(org.ldk.structs.OutPoint channel_funding_outpoint, org.ldk.structs.ChannelMonitor data, org.ldk.structs.MonitorUpdateId update_id) {
211 ChannelMonitorUpdateStatus ret = bindings.Persist_persist_new_channel(this.ptr, channel_funding_outpoint.ptr, data.ptr, update_id.ptr);
212 Reference.reachabilityFence(this);
213 Reference.reachabilityFence(channel_funding_outpoint);
214 Reference.reachabilityFence(data);
215 Reference.reachabilityFence(update_id);
216 if (this != null) { this.ptrs_to.add(channel_funding_outpoint); };
217 if (this != null) { this.ptrs_to.add(data); };
218 if (this != null) { this.ptrs_to.add(update_id); };
223 * Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
226 * Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
227 * updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
230 * During blockchain synchronization operations, and in some rare cases, this may be called with
231 * no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
232 * Note that after the full [`ChannelMonitor`] is persisted any previous
233 * [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
234 * applied to the persisted [`ChannelMonitor`] as they were already applied.
236 * If an implementer chooses to persist the updates only, they need to make
237 * sure that all the updates are applied to the `ChannelMonitors` *before
238 * the set of channel monitors is given to the `ChannelManager`
239 * deserialization routine. See [`ChannelMonitor::update_monitor`] for
240 * applying a monitor update to a monitor. If full `ChannelMonitors` are
241 * persisted, then there is no need to persist individual updates.
243 * Note that there could be a performance tradeoff between persisting complete
244 * channel monitors on every update vs. persisting only updates and applying
245 * them in batches. The size of each monitor grows `O(number of state updates)`
246 * whereas updates are small and `O(1)`.
248 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
249 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
251 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
252 * [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
253 * [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
255 * [`Writeable::write`]: crate::util::ser::Writeable::write
257 * Note that update (or a relevant inner pointer) may be NULL or all-0s to represent None
259 public ChannelMonitorUpdateStatus update_persisted_channel(org.ldk.structs.OutPoint channel_funding_outpoint, @Nullable org.ldk.structs.ChannelMonitorUpdate update, org.ldk.structs.ChannelMonitor data, org.ldk.structs.MonitorUpdateId update_id) {
260 ChannelMonitorUpdateStatus ret = bindings.Persist_update_persisted_channel(this.ptr, channel_funding_outpoint.ptr, update == null ? 0 : update.ptr, data.ptr, update_id.ptr);
261 Reference.reachabilityFence(this);
262 Reference.reachabilityFence(channel_funding_outpoint);
263 Reference.reachabilityFence(update);
264 Reference.reachabilityFence(data);
265 Reference.reachabilityFence(update_id);
266 if (this != null) { this.ptrs_to.add(channel_funding_outpoint); };
267 if (this != null) { this.ptrs_to.add(update); };
268 if (this != null) { this.ptrs_to.add(data); };
269 if (this != null) { this.ptrs_to.add(update_id); };
274 * Prevents the channel monitor from being loaded on startup.
276 * Archiving the data in a backup location (rather than deleting it fully) is useful for
277 * hedging against data loss in case of unexpected failure.
279 public void archive_persisted_channel(org.ldk.structs.OutPoint channel_funding_outpoint) {
280 bindings.Persist_archive_persisted_channel(this.ptr, channel_funding_outpoint.ptr);
281 Reference.reachabilityFence(this);
282 Reference.reachabilityFence(channel_funding_outpoint);
283 if (this != null) { this.ptrs_to.add(channel_funding_outpoint); };