7 namespace org { namespace ldk { namespace structs {
11 /** An implementation of Persist */
12 public interface PersistInterface {
13 /**Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
14 * called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
16 * The data can be stored any way you want, but the identifier provided by LDK is the
17 * channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
18 * and the stored channel data). Note that you **must** persist every new monitor to disk.
20 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
21 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
23 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
24 * and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
26 * [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
27 * [`Writeable::write`]: crate::util::ser::Writeable::write
29 ChannelMonitorUpdateStatus persist_new_channel(OutPoint channel_funding_outpoint, ChannelMonitor data, MonitorUpdateId update_id);
30 /**Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
33 * Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
34 * updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
37 * During blockchain synchronization operations, and in some rare cases, this may be called with
38 * no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
39 * Note that after the full [`ChannelMonitor`] is persisted any previous
40 * [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
41 * applied to the persisted [`ChannelMonitor`] as they were already applied.
43 * If an implementer chooses to persist the updates only, they need to make
44 * sure that all the updates are applied to the `ChannelMonitors` *before
45 * the set of channel monitors is given to the `ChannelManager`
46 * deserialization routine. See [`ChannelMonitor::update_monitor`] for
47 * applying a monitor update to a monitor. If full `ChannelMonitors` are
48 * persisted, then there is no need to persist individual updates.
50 * Note that there could be a performance tradeoff between persisting complete
51 * channel monitors on every update vs. persisting only updates and applying
52 * them in batches. The size of each monitor grows `O(number of state updates)`
53 * whereas updates are small and `O(1)`.
55 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
56 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
58 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
59 * [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
60 * [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
62 * [`Writeable::write`]: crate::util::ser::Writeable::write
64 * Note that update (or a relevant inner pointer) may be NULL or all-0s to represent None
66 ChannelMonitorUpdateStatus update_persisted_channel(OutPoint channel_funding_outpoint, ChannelMonitorUpdate update, ChannelMonitor data, MonitorUpdateId update_id);
67 /**Prevents the channel monitor from being loaded on startup.
69 * Archiving the data in a backup location (rather than deleting it fully) is useful for
70 * hedging against data loss in case of unexpected failure.
72 void archive_persisted_channel(OutPoint channel_funding_outpoint);
76 * `Persist` defines behavior for persisting channel monitors: this could mean
77 * writing once to disk, and/or uploading to one or more backup services.
79 * Persistence can happen in one of two ways - synchronously completing before the trait method
80 * calls return or asynchronously in the background.
82 * # For those implementing synchronous persistence
84 * If persistence completes fully (including any relevant `fsync()` calls), the implementation
85 * should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
88 * If persistence fails for some reason, implementations should consider returning
89 * [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
90 * the background with [`ChainMonitor::list_pending_monitor_updates`] and
91 * [`ChainMonitor::get_monitor`].
93 * Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
94 * be marked as complete via [`ChainMonitor::channel_monitor_updated`].
96 * If at some point no further progress can be made towards persisting the pending updates, the
97 * node should simply shut down.
99 * If the persistence has failed and cannot be retried further (e.g. because of an outage),
100 * [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
101 * an immediate panic and future operations in LDK generally failing.
103 * # For those implementing asynchronous persistence
105 * All calls should generally spawn a background task and immediately return
106 * [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
107 * [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
108 * [`MonitorUpdateId`].
110 * Note that unlike the direct [`chain::Watch`] interface,
111 * [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
113 * If at some point no further progress can be made towards persisting a pending update, the node
114 * should simply shut down. Until then, the background task should either loop indefinitely, or
115 * persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
116 * and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
117 * monitor updates may be marked completed).
119 * # Using remote watchtowers
121 * Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
122 * update process described above while the watchtower is being updated. The following methods are
123 * provided for bulding transactions for a watchtower:
124 * [`ChannelMonitor::initial_counterparty_commitment_tx`],
125 * [`ChannelMonitor::counterparty_commitment_txs_from_update`],
126 * [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
127 * [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
129 * [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
130 * [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
132 public class Persist : CommonBase {
133 internal bindings.LDKPersist bindings_instance;
134 internal long instance_idx;
136 internal Persist(object _dummy, long ptr) : base(ptr) { bindings_instance = null; }
138 if (ptr != 0) { bindings.Persist_free(ptr); }
141 private class LDKPersistHolder { internal Persist held; }
142 private class LDKPersistImpl : bindings.LDKPersist {
143 internal LDKPersistImpl(PersistInterface arg, LDKPersistHolder impl_holder) { this.arg = arg; this.impl_holder = impl_holder; }
144 private PersistInterface arg;
145 private LDKPersistHolder impl_holder;
146 public ChannelMonitorUpdateStatus persist_new_channel(long _channel_funding_outpoint, long _data, long _update_id) {
147 org.ldk.structs.OutPoint _channel_funding_outpoint_hu_conv = null; if (_channel_funding_outpoint < 0 || _channel_funding_outpoint > 4096) { _channel_funding_outpoint_hu_conv = new org.ldk.structs.OutPoint(null, _channel_funding_outpoint); }
148 if (_channel_funding_outpoint_hu_conv != null) { _channel_funding_outpoint_hu_conv.ptrs_to.AddLast(this); };
149 org.ldk.structs.ChannelMonitor _data_hu_conv = null; if (_data < 0 || _data > 4096) { _data_hu_conv = new org.ldk.structs.ChannelMonitor(null, _data); }
150 org.ldk.structs.MonitorUpdateId _update_id_hu_conv = null; if (_update_id < 0 || _update_id > 4096) { _update_id_hu_conv = new org.ldk.structs.MonitorUpdateId(null, _update_id); }
151 if (_update_id_hu_conv != null) { _update_id_hu_conv.ptrs_to.AddLast(this); };
152 ChannelMonitorUpdateStatus ret = arg.persist_new_channel(_channel_funding_outpoint_hu_conv, _data_hu_conv, _update_id_hu_conv);
156 public ChannelMonitorUpdateStatus update_persisted_channel(long _channel_funding_outpoint, long _update, long _data, long _update_id) {
157 org.ldk.structs.OutPoint _channel_funding_outpoint_hu_conv = null; if (_channel_funding_outpoint < 0 || _channel_funding_outpoint > 4096) { _channel_funding_outpoint_hu_conv = new org.ldk.structs.OutPoint(null, _channel_funding_outpoint); }
158 if (_channel_funding_outpoint_hu_conv != null) { _channel_funding_outpoint_hu_conv.ptrs_to.AddLast(this); };
159 org.ldk.structs.ChannelMonitorUpdate _update_hu_conv = null; if (_update < 0 || _update > 4096) { _update_hu_conv = new org.ldk.structs.ChannelMonitorUpdate(null, _update); }
160 if (_update_hu_conv != null) { _update_hu_conv.ptrs_to.AddLast(this); };
161 org.ldk.structs.ChannelMonitor _data_hu_conv = null; if (_data < 0 || _data > 4096) { _data_hu_conv = new org.ldk.structs.ChannelMonitor(null, _data); }
162 org.ldk.structs.MonitorUpdateId _update_id_hu_conv = null; if (_update_id < 0 || _update_id > 4096) { _update_id_hu_conv = new org.ldk.structs.MonitorUpdateId(null, _update_id); }
163 if (_update_id_hu_conv != null) { _update_id_hu_conv.ptrs_to.AddLast(this); };
164 ChannelMonitorUpdateStatus ret = arg.update_persisted_channel(_channel_funding_outpoint_hu_conv, _update_hu_conv, _data_hu_conv, _update_id_hu_conv);
168 public void archive_persisted_channel(long _channel_funding_outpoint) {
169 org.ldk.structs.OutPoint _channel_funding_outpoint_hu_conv = null; if (_channel_funding_outpoint < 0 || _channel_funding_outpoint > 4096) { _channel_funding_outpoint_hu_conv = new org.ldk.structs.OutPoint(null, _channel_funding_outpoint); }
170 if (_channel_funding_outpoint_hu_conv != null) { _channel_funding_outpoint_hu_conv.ptrs_to.AddLast(this); };
171 arg.archive_persisted_channel(_channel_funding_outpoint_hu_conv);
176 /** Creates a new instance of Persist from a given implementation */
177 public static Persist new_impl(PersistInterface arg) {
178 LDKPersistHolder impl_holder = new LDKPersistHolder();
179 LDKPersistImpl impl = new LDKPersistImpl(arg, impl_holder);
180 long[] ptr_idx = bindings.LDKPersist_new(impl);
182 impl_holder.held = new Persist(null, ptr_idx[0]);
183 impl_holder.held.instance_idx = ptr_idx[1];
184 impl_holder.held.bindings_instance = impl;
185 return impl_holder.held;
189 * Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
190 * called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
192 * The data can be stored any way you want, but the identifier provided by LDK is the
193 * channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
194 * and the stored channel data). Note that you **must** persist every new monitor to disk.
196 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
197 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
199 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
200 * and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
202 * [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
203 * [`Writeable::write`]: crate::util::ser::Writeable::write
205 public ChannelMonitorUpdateStatus persist_new_channel(org.ldk.structs.OutPoint channel_funding_outpoint, org.ldk.structs.ChannelMonitor data, org.ldk.structs.MonitorUpdateId update_id) {
206 ChannelMonitorUpdateStatus ret = bindings.Persist_persist_new_channel(this.ptr, channel_funding_outpoint.ptr, data.ptr, update_id.ptr);
208 GC.KeepAlive(channel_funding_outpoint);
210 GC.KeepAlive(update_id);
211 if (this != null) { this.ptrs_to.AddLast(channel_funding_outpoint); };
212 if (this != null) { this.ptrs_to.AddLast(data); };
213 if (this != null) { this.ptrs_to.AddLast(update_id); };
218 * Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
221 * Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
222 * updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
225 * During blockchain synchronization operations, and in some rare cases, this may be called with
226 * no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
227 * Note that after the full [`ChannelMonitor`] is persisted any previous
228 * [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
229 * applied to the persisted [`ChannelMonitor`] as they were already applied.
231 * If an implementer chooses to persist the updates only, they need to make
232 * sure that all the updates are applied to the `ChannelMonitors` *before
233 * the set of channel monitors is given to the `ChannelManager`
234 * deserialization routine. See [`ChannelMonitor::update_monitor`] for
235 * applying a monitor update to a monitor. If full `ChannelMonitors` are
236 * persisted, then there is no need to persist individual updates.
238 * Note that there could be a performance tradeoff between persisting complete
239 * channel monitors on every update vs. persisting only updates and applying
240 * them in batches. The size of each monitor grows `O(number of state updates)`
241 * whereas updates are small and `O(1)`.
243 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
244 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
246 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
247 * [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
248 * [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
250 * [`Writeable::write`]: crate::util::ser::Writeable::write
252 * Note that update (or a relevant inner pointer) may be NULL or all-0s to represent None
254 public ChannelMonitorUpdateStatus update_persisted_channel(org.ldk.structs.OutPoint channel_funding_outpoint, org.ldk.structs.ChannelMonitorUpdate update, org.ldk.structs.ChannelMonitor data, org.ldk.structs.MonitorUpdateId update_id) {
255 ChannelMonitorUpdateStatus ret = bindings.Persist_update_persisted_channel(this.ptr, channel_funding_outpoint.ptr, update == null ? 0 : update.ptr, data.ptr, update_id.ptr);
257 GC.KeepAlive(channel_funding_outpoint);
258 GC.KeepAlive(update);
260 GC.KeepAlive(update_id);
261 if (this != null) { this.ptrs_to.AddLast(channel_funding_outpoint); };
262 if (this != null) { this.ptrs_to.AddLast(update); };
263 if (this != null) { this.ptrs_to.AddLast(data); };
264 if (this != null) { this.ptrs_to.AddLast(update_id); };
269 * Prevents the channel monitor from being loaded on startup.
271 * Archiving the data in a backup location (rather than deleting it fully) is useful for
272 * hedging against data loss in case of unexpected failure.
274 public void archive_persisted_channel(org.ldk.structs.OutPoint channel_funding_outpoint) {
275 bindings.Persist_archive_persisted_channel(this.ptr, channel_funding_outpoint.ptr);
277 GC.KeepAlive(channel_funding_outpoint);
278 if (this != null) { this.ptrs_to.AddLast(channel_funding_outpoint); };