7 namespace org { namespace ldk { namespace structs {
11 /** An implementation of Persist */
12 public interface PersistInterface {
13 /**Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
14 * called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
16 * The data can be stored any way you want, but the identifier provided by LDK is the
17 * channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
18 * and the stored channel data). Note that you **must** persist every new monitor to disk.
20 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
21 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
23 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
24 * and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
26 * [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
27 * [`Writeable::write`]: crate::util::ser::Writeable::write
29 ChannelMonitorUpdateStatus persist_new_channel(OutPoint channel_id, ChannelMonitor data, MonitorUpdateId update_id);
30 /**Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
33 * Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
34 * updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
37 * During blockchain synchronization operations, and in some rare cases, this may be called with
38 * no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
39 * Note that after the full [`ChannelMonitor`] is persisted any previous
40 * [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
41 * applied to the persisted [`ChannelMonitor`] as they were already applied.
43 * If an implementer chooses to persist the updates only, they need to make
44 * sure that all the updates are applied to the `ChannelMonitors` *before
45 * the set of channel monitors is given to the `ChannelManager`
46 * deserialization routine. See [`ChannelMonitor::update_monitor`] for
47 * applying a monitor update to a monitor. If full `ChannelMonitors` are
48 * persisted, then there is no need to persist individual updates.
50 * Note that there could be a performance tradeoff between persisting complete
51 * channel monitors on every update vs. persisting only updates and applying
52 * them in batches. The size of each monitor grows `O(number of state updates)`
53 * whereas updates are small and `O(1)`.
55 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
56 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
58 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
59 * [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
60 * [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
62 * [`Writeable::write`]: crate::util::ser::Writeable::write
64 * Note that update (or a relevant inner pointer) may be NULL or all-0s to represent None
66 ChannelMonitorUpdateStatus update_persisted_channel(OutPoint channel_id, ChannelMonitorUpdate update, ChannelMonitor data, MonitorUpdateId update_id);
70 * `Persist` defines behavior for persisting channel monitors: this could mean
71 * writing once to disk, and/or uploading to one or more backup services.
73 * Persistence can happen in one of two ways - synchronously completing before the trait method
74 * calls return or asynchronously in the background.
76 * # For those implementing synchronous persistence
78 * If persistence completes fully (including any relevant `fsync()` calls), the implementation
79 * should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
82 * If persistence fails for some reason, implementations should consider returning
83 * [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
84 * the background with [`ChainMonitor::list_pending_monitor_updates`] and
85 * [`ChainMonitor::get_monitor`].
87 * Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
88 * be marked as complete via [`ChainMonitor::channel_monitor_updated`].
90 * If at some point no further progress can be made towards persisting the pending updates, the
91 * node should simply shut down.
93 * If the persistence has failed and cannot be retried further (e.g. because of an outage),
94 * [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
95 * an immediate panic and future operations in LDK generally failing.
97 * # For those implementing asynchronous persistence
99 * All calls should generally spawn a background task and immediately return
100 * [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
101 * [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
102 * [`MonitorUpdateId`].
104 * Note that unlike the direct [`chain::Watch`] interface,
105 * [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
107 * If at some point no further progress can be made towards persisting a pending update, the node
108 * should simply shut down. Until then, the background task should either loop indefinitely, or
109 * persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
110 * and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
111 * monitor updates may be marked completed).
113 * # Using remote watchtowers
115 * Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
116 * update process described above while the watchtower is being updated. The following methods are
117 * provided for bulding transactions for a watchtower:
118 * [`ChannelMonitor::initial_counterparty_commitment_tx`],
119 * [`ChannelMonitor::counterparty_commitment_txs_from_update`],
120 * [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
121 * [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
123 * [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
124 * [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
126 public class Persist : CommonBase {
127 internal bindings.LDKPersist bindings_instance;
128 internal long instance_idx;
130 internal Persist(object _dummy, long ptr) : base(ptr) { bindings_instance = null; }
132 if (ptr != 0) { bindings.Persist_free(ptr); }
135 private class LDKPersistHolder { internal Persist held; }
136 private class LDKPersistImpl : bindings.LDKPersist {
137 internal LDKPersistImpl(PersistInterface arg, LDKPersistHolder impl_holder) { this.arg = arg; this.impl_holder = impl_holder; }
138 private PersistInterface arg;
139 private LDKPersistHolder impl_holder;
140 public ChannelMonitorUpdateStatus persist_new_channel(long _channel_id, long _data, long _update_id) {
141 org.ldk.structs.OutPoint _channel_id_hu_conv = null; if (_channel_id < 0 || _channel_id > 4096) { _channel_id_hu_conv = new org.ldk.structs.OutPoint(null, _channel_id); }
142 if (_channel_id_hu_conv != null) { _channel_id_hu_conv.ptrs_to.AddLast(this); };
143 org.ldk.structs.ChannelMonitor _data_hu_conv = null; if (_data < 0 || _data > 4096) { _data_hu_conv = new org.ldk.structs.ChannelMonitor(null, _data); }
144 org.ldk.structs.MonitorUpdateId _update_id_hu_conv = null; if (_update_id < 0 || _update_id > 4096) { _update_id_hu_conv = new org.ldk.structs.MonitorUpdateId(null, _update_id); }
145 if (_update_id_hu_conv != null) { _update_id_hu_conv.ptrs_to.AddLast(this); };
146 ChannelMonitorUpdateStatus ret = arg.persist_new_channel(_channel_id_hu_conv, _data_hu_conv, _update_id_hu_conv);
150 public ChannelMonitorUpdateStatus update_persisted_channel(long _channel_id, long _update, long _data, long _update_id) {
151 org.ldk.structs.OutPoint _channel_id_hu_conv = null; if (_channel_id < 0 || _channel_id > 4096) { _channel_id_hu_conv = new org.ldk.structs.OutPoint(null, _channel_id); }
152 if (_channel_id_hu_conv != null) { _channel_id_hu_conv.ptrs_to.AddLast(this); };
153 org.ldk.structs.ChannelMonitorUpdate _update_hu_conv = null; if (_update < 0 || _update > 4096) { _update_hu_conv = new org.ldk.structs.ChannelMonitorUpdate(null, _update); }
154 if (_update_hu_conv != null) { _update_hu_conv.ptrs_to.AddLast(this); };
155 org.ldk.structs.ChannelMonitor _data_hu_conv = null; if (_data < 0 || _data > 4096) { _data_hu_conv = new org.ldk.structs.ChannelMonitor(null, _data); }
156 org.ldk.structs.MonitorUpdateId _update_id_hu_conv = null; if (_update_id < 0 || _update_id > 4096) { _update_id_hu_conv = new org.ldk.structs.MonitorUpdateId(null, _update_id); }
157 if (_update_id_hu_conv != null) { _update_id_hu_conv.ptrs_to.AddLast(this); };
158 ChannelMonitorUpdateStatus ret = arg.update_persisted_channel(_channel_id_hu_conv, _update_hu_conv, _data_hu_conv, _update_id_hu_conv);
164 /** Creates a new instance of Persist from a given implementation */
165 public static Persist new_impl(PersistInterface arg) {
166 LDKPersistHolder impl_holder = new LDKPersistHolder();
167 LDKPersistImpl impl = new LDKPersistImpl(arg, impl_holder);
168 long[] ptr_idx = bindings.LDKPersist_new(impl);
170 impl_holder.held = new Persist(null, ptr_idx[0]);
171 impl_holder.held.instance_idx = ptr_idx[1];
172 impl_holder.held.bindings_instance = impl;
173 return impl_holder.held;
177 * Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
178 * called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
180 * The data can be stored any way you want, but the identifier provided by LDK is the
181 * channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
182 * and the stored channel data). Note that you **must** persist every new monitor to disk.
184 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
185 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
187 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
188 * and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
190 * [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
191 * [`Writeable::write`]: crate::util::ser::Writeable::write
193 public ChannelMonitorUpdateStatus persist_new_channel(org.ldk.structs.OutPoint channel_id, org.ldk.structs.ChannelMonitor data, org.ldk.structs.MonitorUpdateId update_id) {
194 ChannelMonitorUpdateStatus ret = bindings.Persist_persist_new_channel(this.ptr, channel_id == null ? 0 : channel_id.ptr, data == null ? 0 : data.ptr, update_id == null ? 0 : update_id.ptr);
196 GC.KeepAlive(channel_id);
198 GC.KeepAlive(update_id);
199 if (this != null) { this.ptrs_to.AddLast(channel_id); };
200 if (this != null) { this.ptrs_to.AddLast(data); };
201 if (this != null) { this.ptrs_to.AddLast(update_id); };
206 * Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
209 * Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
210 * updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
213 * During blockchain synchronization operations, and in some rare cases, this may be called with
214 * no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
215 * Note that after the full [`ChannelMonitor`] is persisted any previous
216 * [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
217 * applied to the persisted [`ChannelMonitor`] as they were already applied.
219 * If an implementer chooses to persist the updates only, they need to make
220 * sure that all the updates are applied to the `ChannelMonitors` *before
221 * the set of channel monitors is given to the `ChannelManager`
222 * deserialization routine. See [`ChannelMonitor::update_monitor`] for
223 * applying a monitor update to a monitor. If full `ChannelMonitors` are
224 * persisted, then there is no need to persist individual updates.
226 * Note that there could be a performance tradeoff between persisting complete
227 * channel monitors on every update vs. persisting only updates and applying
228 * them in batches. The size of each monitor grows `O(number of state updates)`
229 * whereas updates are small and `O(1)`.
231 * The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
232 * if you return [`ChannelMonitorUpdateStatus::InProgress`].
234 * See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
235 * [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
236 * [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
238 * [`Writeable::write`]: crate::util::ser::Writeable::write
240 * Note that update (or a relevant inner pointer) may be NULL or all-0s to represent None
242 public ChannelMonitorUpdateStatus update_persisted_channel(org.ldk.structs.OutPoint channel_id, org.ldk.structs.ChannelMonitorUpdate update, org.ldk.structs.ChannelMonitor data, org.ldk.structs.MonitorUpdateId update_id) {
243 ChannelMonitorUpdateStatus ret = bindings.Persist_update_persisted_channel(this.ptr, channel_id == null ? 0 : channel_id.ptr, update == null ? 0 : update.ptr, data == null ? 0 : data.ptr, update_id == null ? 0 : update_id.ptr);
245 GC.KeepAlive(channel_id);
246 GC.KeepAlive(update);
248 GC.KeepAlive(update_id);
249 if (this != null) { this.ptrs_to.AddLast(channel_id); };
250 if (this != null) { this.ptrs_to.AddLast(update); };
251 if (this != null) { this.ptrs_to.AddLast(data); };
252 if (this != null) { this.ptrs_to.AddLast(update_id); };