* You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
* information and are actively monitoring the chain.
*
- * Pending Events or updated HTLCs which have not yet been read out by
- * get_and_clear_pending_monitor_events or get_and_clear_pending_events are serialized to disk and
- * reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events
- * gotten are fully handled before re-serializing the new state.
- *
* Note that the deserializer is only implemented for (BlockHash, ChannelMonitor), which
* tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
* the \"reorg path\" (ie disconnecting blocks until you find a common ancestor from both the
* panics if the given update is not the next update by update_id.
*/
public Result_NoneNoneZ update_monitor(org.ldk.structs.ChannelMonitorUpdate updates, org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
- long ret = bindings.ChannelMonitor_update_monitor(this.ptr, updates == null ? 0 : updates.ptr, broadcaster == null ? 0 : broadcaster.ptr, fee_estimator == null ? 0 : fee_estimator.ptr, logger == null ? 0 : logger.ptr);
+ long ret = bindings.ChannelMonitor_update_monitor(this.ptr, updates == null ? 0 : updates.ptr, broadcaster.ptr, fee_estimator.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(updates);
GC.KeepAlive(broadcaster);
* have been registered.
*/
public void load_outputs_to_watch(org.ldk.structs.Filter filter) {
- bindings.ChannelMonitor_load_outputs_to_watch(this.ptr, filter == null ? 0 : filter.ptr);
+ bindings.ChannelMonitor_load_outputs_to_watch(this.ptr, filter.ptr);
GC.KeepAlive(this);
GC.KeepAlive(filter);
if (this != null) { this.ptrs_to.AddLast(filter); };
}
/**
- * Gets the list of pending events which were generated by previous actions, clearing the list
- * in the process.
+ * Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
+ *
+ * For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
+ * events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
+ * within each channel. As the confirmation of a commitment transaction may be critical to the
+ * safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
+ * environment with spotty connections, like on mobile.
*
- * This is called by the [`EventsProvider::process_pending_events`] implementation for
- * [`ChainMonitor`].
+ * An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
+ * order to handle these events.
*
- * [`EventsProvider::process_pending_events`]: crate::util::events::EventsProvider::process_pending_events
- * [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
+ * [`SpendableOutputs`]: crate::events::Event::SpendableOutputs
+ * [`BumpTransaction`]: crate::events::Event::BumpTransaction
*/
- public Event[] get_and_clear_pending_events() {
- long[] ret = bindings.ChannelMonitor_get_and_clear_pending_events(this.ptr);
+ public void process_pending_events(org.ldk.structs.EventHandler handler) {
+ bindings.ChannelMonitor_process_pending_events(this.ptr, handler.ptr);
GC.KeepAlive(this);
- int ret_conv_7_len = ret.Length;
- Event[] ret_conv_7_arr = new Event[ret_conv_7_len];
- for (int h = 0; h < ret_conv_7_len; h++) {
- long ret_conv_7 = ret[h];
- org.ldk.structs.Event ret_conv_7_hu_conv = org.ldk.structs.Event.constr_from_ptr(ret_conv_7);
- if (ret_conv_7_hu_conv != null) { ret_conv_7_hu_conv.ptrs_to.AddLast(this); };
- ret_conv_7_arr[h] = ret_conv_7_hu_conv;
- }
- return ret_conv_7_arr;
+ GC.KeepAlive(handler);
+ if (this != null) { this.ptrs_to.AddLast(handler); };
}
/**
* [`ChannelMonitorUpdateStatus::PermanentFailure`]: super::ChannelMonitorUpdateStatus::PermanentFailure
*/
public byte[][] get_latest_holder_commitment_txn(org.ldk.structs.Logger logger) {
- byte[][] ret = bindings.ChannelMonitor_get_latest_holder_commitment_txn(this.ptr, logger == null ? 0 : logger.ptr);
+ byte[][] ret = bindings.ChannelMonitor_get_latest_holder_commitment_txn(this.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(logger);
if (this != null) { this.ptrs_to.AddLast(logger); };
* [`get_outputs_to_watch`]: #method.get_outputs_to_watch
*/
public TwoTuple_TxidCVec_C2Tuple_u32TxOutZZZ[] block_connected(byte[] header, TwoTuple_usizeTransactionZ[] txdata, int height, org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
- long[] ret = bindings.ChannelMonitor_block_connected(this.ptr, InternalUtils.check_arr_len(header, 80), txdata != null ? InternalUtils.mapArray(txdata, txdata_conv_28 => txdata_conv_28 != null ? txdata_conv_28.ptr : 0) : null, height, broadcaster == null ? 0 : broadcaster.ptr, fee_estimator == null ? 0 : fee_estimator.ptr, logger == null ? 0 : logger.ptr);
+ long[] ret = bindings.ChannelMonitor_block_connected(this.ptr, InternalUtils.check_arr_len(header, 80), txdata != null ? InternalUtils.mapArray(txdata, txdata_conv_28 => txdata_conv_28 != null ? txdata_conv_28.ptr : 0) : null, height, broadcaster.ptr, fee_estimator.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(header);
GC.KeepAlive(txdata);
* appropriately.
*/
public void block_disconnected(byte[] header, int height, org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
- bindings.ChannelMonitor_block_disconnected(this.ptr, InternalUtils.check_arr_len(header, 80), height, broadcaster == null ? 0 : broadcaster.ptr, fee_estimator == null ? 0 : fee_estimator.ptr, logger == null ? 0 : logger.ptr);
+ bindings.ChannelMonitor_block_disconnected(this.ptr, InternalUtils.check_arr_len(header, 80), height, broadcaster.ptr, fee_estimator.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(header);
GC.KeepAlive(height);
* [`block_connected`]: Self::block_connected
*/
public TwoTuple_TxidCVec_C2Tuple_u32TxOutZZZ[] transactions_confirmed(byte[] header, TwoTuple_usizeTransactionZ[] txdata, int height, org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
- long[] ret = bindings.ChannelMonitor_transactions_confirmed(this.ptr, InternalUtils.check_arr_len(header, 80), txdata != null ? InternalUtils.mapArray(txdata, txdata_conv_28 => txdata_conv_28 != null ? txdata_conv_28.ptr : 0) : null, height, broadcaster == null ? 0 : broadcaster.ptr, fee_estimator == null ? 0 : fee_estimator.ptr, logger == null ? 0 : logger.ptr);
+ long[] ret = bindings.ChannelMonitor_transactions_confirmed(this.ptr, InternalUtils.check_arr_len(header, 80), txdata != null ? InternalUtils.mapArray(txdata, txdata_conv_28 => txdata_conv_28 != null ? txdata_conv_28.ptr : 0) : null, height, broadcaster.ptr, fee_estimator.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(header);
GC.KeepAlive(txdata);
* [`block_disconnected`]: Self::block_disconnected
*/
public void transaction_unconfirmed(byte[] txid, org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
- bindings.ChannelMonitor_transaction_unconfirmed(this.ptr, InternalUtils.check_arr_len(txid, 32), broadcaster == null ? 0 : broadcaster.ptr, fee_estimator == null ? 0 : fee_estimator.ptr, logger == null ? 0 : logger.ptr);
+ bindings.ChannelMonitor_transaction_unconfirmed(this.ptr, InternalUtils.check_arr_len(txid, 32), broadcaster.ptr, fee_estimator.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(txid);
GC.KeepAlive(broadcaster);
* [`block_connected`]: Self::block_connected
*/
public TwoTuple_TxidCVec_C2Tuple_u32TxOutZZZ[] best_block_updated(byte[] header, int height, org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
- long[] ret = bindings.ChannelMonitor_best_block_updated(this.ptr, InternalUtils.check_arr_len(header, 80), height, broadcaster == null ? 0 : broadcaster.ptr, fee_estimator == null ? 0 : fee_estimator.ptr, logger == null ? 0 : logger.ptr);
+ long[] ret = bindings.ChannelMonitor_best_block_updated(this.ptr, InternalUtils.check_arr_len(header, 80), height, broadcaster.ptr, fee_estimator.ptr, logger.ptr);
GC.KeepAlive(this);
GC.KeepAlive(header);
GC.KeepAlive(height);
/**
* Returns the set of txids that should be monitored for re-organization out of the chain.
*/
- public TwoTuple_TxidBlockHashZ[] get_relevant_txids() {
+ public TwoTuple_TxidCOption_BlockHashZZ[] get_relevant_txids() {
long[] ret = bindings.ChannelMonitor_get_relevant_txids(this.ptr);
GC.KeepAlive(this);
- int ret_conv_25_len = ret.Length;
- TwoTuple_TxidBlockHashZ[] ret_conv_25_arr = new TwoTuple_TxidBlockHashZ[ret_conv_25_len];
- for (int z = 0; z < ret_conv_25_len; z++) {
- long ret_conv_25 = ret[z];
- TwoTuple_TxidBlockHashZ ret_conv_25_hu_conv = new TwoTuple_TxidBlockHashZ(null, ret_conv_25);
- if (ret_conv_25_hu_conv != null) { ret_conv_25_hu_conv.ptrs_to.AddLast(this); };
- ret_conv_25_arr[z] = ret_conv_25_hu_conv;
+ int ret_conv_34_len = ret.Length;
+ TwoTuple_TxidCOption_BlockHashZZ[] ret_conv_34_arr = new TwoTuple_TxidCOption_BlockHashZZ[ret_conv_34_len];
+ for (int i = 0; i < ret_conv_34_len; i++) {
+ long ret_conv_34 = ret[i];
+ TwoTuple_TxidCOption_BlockHashZZ ret_conv_34_hu_conv = new TwoTuple_TxidCOption_BlockHashZZ(null, ret_conv_34);
+ if (ret_conv_34_hu_conv != null) { ret_conv_34_hu_conv.ptrs_to.AddLast(this); };
+ ret_conv_34_arr[i] = ret_conv_34_hu_conv;
}
- return ret_conv_25_arr;
+ return ret_conv_34_arr;
}
/**
return ret_hu_conv;
}
+ /**
+ * Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
+ * crucial in preventing certain classes of pinning attacks, detecting substantial mempool
+ * feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
+ * invoking this every 30 seconds, or lower if running in an environment with spotty
+ * connections, like on mobile.
+ */
+ public void rebroadcast_pending_claims(org.ldk.structs.BroadcasterInterface broadcaster, org.ldk.structs.FeeEstimator fee_estimator, org.ldk.structs.Logger logger) {
+ bindings.ChannelMonitor_rebroadcast_pending_claims(this.ptr, broadcaster.ptr, fee_estimator.ptr, logger.ptr);
+ GC.KeepAlive(this);
+ GC.KeepAlive(broadcaster);
+ GC.KeepAlive(fee_estimator);
+ GC.KeepAlive(logger);
+ if (this != null) { this.ptrs_to.AddLast(broadcaster); };
+ if (this != null) { this.ptrs_to.AddLast(fee_estimator); };
+ if (this != null) { this.ptrs_to.AddLast(logger); };
+ }
+
/**
* Gets the balances in this channel which are either claimable by us if we were to
* force-close the channel now or which are claimable on-chain (possibly awaiting