X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=ts%2Fstructs%2FProbabilisticScoringParameters.mts;fp=ts%2Fstructs%2FProbabilisticScoringParameters.mts;h=c46a7a0bcdebf9e324b8c8661eca77289737b6db;hb=c235b1252356e371b27ba8735595c59aaf69c2d1;hp=a5f29bde93c73d93d8a1dc21bf57338c14c88760;hpb=5e9de82b3a7712a41189756d9d16d946142b2ac5;p=ldk-java diff --git a/ts/structs/ProbabilisticScoringParameters.mts b/ts/structs/ProbabilisticScoringParameters.mts index a5f29bde..c46a7a0b 100644 --- a/ts/structs/ProbabilisticScoringParameters.mts +++ b/ts/structs/ProbabilisticScoringParameters.mts @@ -2,7 +2,7 @@ import { TxOut } from '../structs/TxOut.mjs'; import { BigEndianScalar } from '../structs/BigEndianScalar.mjs'; import { AccessError } from '../enums/AccessError.mjs'; import { COption_NoneZ } from '../enums/COption_NoneZ.mjs'; -import { ChannelMonitorUpdateErr } from '../enums/ChannelMonitorUpdateErr.mjs'; +import { ChannelMonitorUpdateStatus } from '../enums/ChannelMonitorUpdateStatus.mjs'; import { ConfirmationTarget } from '../enums/ConfirmationTarget.mjs'; import { CreationError } from '../enums/CreationError.mjs'; import { Currency } from '../enums/Currency.mjs'; @@ -109,7 +109,6 @@ import { GossipTimestampFilter } from '../structs/GossipTimestampFilter.mjs'; import { MessageSendEvent } from '../structs/MessageSendEvent.mjs'; import { Result_TxOutAccessErrorZ } from '../structs/Result_TxOutAccessErrorZ.mjs'; import { TwoTuple_usizeTransactionZ } from '../structs/TwoTuple_usizeTransactionZ.mjs'; -import { Result_NoneChannelMonitorUpdateErrZ } from '../structs/Result_NoneChannelMonitorUpdateErrZ.mjs'; import { HTLCUpdate } from '../structs/HTLCUpdate.mjs'; import { MonitorEvent } from '../structs/MonitorEvent.mjs'; import { ThreeTuple_OutPointCVec_MonitorEventZPublicKeyZ } from '../structs/ThreeTuple_OutPointCVec_MonitorEventZPublicKeyZ.mjs'; @@ -165,6 +164,7 @@ import { Result_SignatureNoneZ } from '../structs/Result_SignatureNoneZ.mjs'; import { TwoTuple_SignatureSignatureZ } from '../structs/TwoTuple_SignatureSignatureZ.mjs'; import { Result_C2Tuple_SignatureSignatureZNoneZ } from '../structs/Result_C2Tuple_SignatureSignatureZNoneZ.mjs'; import { Result_SecretKeyNoneZ } from '../structs/Result_SecretKeyNoneZ.mjs'; +import { Result_PublicKeyNoneZ } from '../structs/Result_PublicKeyNoneZ.mjs'; import { Option_ScalarZ } from '../structs/Option_ScalarZ.mjs'; import { Result_SharedSecretNoneZ } from '../structs/Result_SharedSecretNoneZ.mjs'; import { ClosingTransaction } from '../structs/ClosingTransaction.mjs'; @@ -252,6 +252,9 @@ import { Balance } from '../structs/Balance.mjs'; import { TwoTuple_BlockHashChannelMonitorZ } from '../structs/TwoTuple_BlockHashChannelMonitorZ.mjs'; import { Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ } from '../structs/Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.mjs'; import { TwoTuple_PublicKeyTypeZ } from '../structs/TwoTuple_PublicKeyTypeZ.mjs'; +import { CustomOnionMessageContents, CustomOnionMessageContentsInterface } from '../structs/CustomOnionMessageContents.mjs'; +import { Option_CustomOnionMessageContentsZ } from '../structs/Option_CustomOnionMessageContentsZ.mjs'; +import { Result_COption_CustomOnionMessageContentsZDecodeErrorZ } from '../structs/Result_COption_CustomOnionMessageContentsZDecodeErrorZ.mjs'; import { Option_NetAddressZ } from '../structs/Option_NetAddressZ.mjs'; import { PeerHandleError } from '../structs/PeerHandleError.mjs'; import { Result_CVec_u8ZPeerHandleErrorZ } from '../structs/Result_CVec_u8ZPeerHandleErrorZ.mjs'; @@ -352,6 +355,7 @@ import { OnionMessageHandler, OnionMessageHandlerInterface } from '../structs/On import { CustomMessageReader, CustomMessageReaderInterface } from '../structs/CustomMessageReader.mjs'; import { CustomMessageHandler, CustomMessageHandlerInterface } from '../structs/CustomMessageHandler.mjs'; import { IgnoringMessageHandler } from '../structs/IgnoringMessageHandler.mjs'; +import { CustomOnionMessageHandler, CustomOnionMessageHandlerInterface } from '../structs/CustomOnionMessageHandler.mjs'; import { ErroringMessageHandler } from '../structs/ErroringMessageHandler.mjs'; import { MessageHandler } from '../structs/MessageHandler.mjs'; import { SocketDescriptor, SocketDescriptorInterface } from '../structs/SocketDescriptor.mjs'; @@ -392,7 +396,7 @@ import * as bindings from '../bindings.mjs' */ export class ProbabilisticScoringParameters extends CommonBase { /* @internal */ - public constructor(_dummy: object, ptr: bigint) { + public constructor(_dummy: null, ptr: bigint) { super(ptr, bindings.ProbabilisticScoringParameters_free); } @@ -454,7 +458,8 @@ export class ProbabilisticScoringParameters extends CommonBase { /** * A multiplier used in conjunction with the negative `log10` of the channel's success - * probability for a payment to determine the liquidity penalty. + * probability for a payment, as determined by our latest estimates of the channel's + * liquidity, to determine the liquidity penalty. * * The penalty is based in part on the knowledge learned from prior successful and unsuccessful * payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The @@ -463,7 +468,9 @@ export class ProbabilisticScoringParameters extends CommonBase { * uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will * result in a `u64::max_value` penalty, however. * - * Default value: 40,000 msat + * `-log10(success_probability) * liquidity_penalty_multiplier_msat` + * + * Default value: 30,000 msat * * [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life */ @@ -474,7 +481,8 @@ export class ProbabilisticScoringParameters extends CommonBase { /** * A multiplier used in conjunction with the negative `log10` of the channel's success - * probability for a payment to determine the liquidity penalty. + * probability for a payment, as determined by our latest estimates of the channel's + * liquidity, to determine the liquidity penalty. * * The penalty is based in part on the knowledge learned from prior successful and unsuccessful * payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The @@ -483,7 +491,9 @@ export class ProbabilisticScoringParameters extends CommonBase { * uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will * result in a `u64::max_value` penalty, however. * - * Default value: 40,000 msat + * `-log10(success_probability) * liquidity_penalty_multiplier_msat` + * + * Default value: 30,000 msat * * [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life */ @@ -492,14 +502,20 @@ export class ProbabilisticScoringParameters extends CommonBase { } /** - * The time required to elapse before any knowledge learned about channel liquidity balances is - * cut in half. + * Whenever this amount of time elapses since the last update to a channel's liquidity bounds, + * the distance from the bounds to \"zero\" is cut in half. In other words, the lower-bound on + * the available liquidity is halved and the upper-bound moves half-way to the channel's total + * capacity. * - * The bounds are defined in terms of offsets and are initially zero. Increasing the offsets - * gives tighter bounds on the channel liquidity balance. Thus, halving the offsets decreases - * the certainty of the channel liquidity balance. + * Because halving the liquidity bounds grows the uncertainty on the channel's liquidity, + * the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`] + * struct documentation for more info on the way the liquidity bounds are used. * - * Default value: 1 hour + * For example, if the channel's capacity is 1 million sats, and the current upper and lower + * liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper + * and lower liquidity bounds will be decayed to 100,000 and 800,000 sats. + * + * Default value: 6 hours * * # Note * @@ -512,14 +528,20 @@ export class ProbabilisticScoringParameters extends CommonBase { } /** - * The time required to elapse before any knowledge learned about channel liquidity balances is - * cut in half. + * Whenever this amount of time elapses since the last update to a channel's liquidity bounds, + * the distance from the bounds to \"zero\" is cut in half. In other words, the lower-bound on + * the available liquidity is halved and the upper-bound moves half-way to the channel's total + * capacity. + * + * Because halving the liquidity bounds grows the uncertainty on the channel's liquidity, + * the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`] + * struct documentation for more info on the way the liquidity bounds are used. * - * The bounds are defined in terms of offsets and are initially zero. Increasing the offsets - * gives tighter bounds on the channel liquidity balance. Thus, halving the offsets decreases - * the certainty of the channel liquidity balance. + * For example, if the channel's capacity is 1 million sats, and the current upper and lower + * liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper + * and lower liquidity bounds will be decayed to 100,000 and 800,000 sats. * - * Default value: 1 hour + * Default value: 6 hours * * # Note * @@ -532,7 +554,8 @@ export class ProbabilisticScoringParameters extends CommonBase { /** * A multiplier used in conjunction with a payment amount and the negative `log10` of the - * channel's success probability for the payment to determine the amount penalty. + * channel's success probability for the payment, as determined by our latest estimates of the + * channel's liquidity, to determine the amount penalty. * * The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e., * fees plus penalty) for large payments. The penalty is computed as the product of this @@ -547,7 +570,7 @@ export class ProbabilisticScoringParameters extends CommonBase { * probabilities, the multiplier will have a decreasing effect as the negative `log10` will * fall below `1`. * - * Default value: 256 msat + * Default value: 192 msat */ public get_liquidity_penalty_amount_multiplier_msat(): bigint { const ret: bigint = bindings.ProbabilisticScoringParameters_get_liquidity_penalty_amount_multiplier_msat(this.ptr); @@ -556,7 +579,8 @@ export class ProbabilisticScoringParameters extends CommonBase { /** * A multiplier used in conjunction with a payment amount and the negative `log10` of the - * channel's success probability for the payment to determine the amount penalty. + * channel's success probability for the payment, as determined by our latest estimates of the + * channel's liquidity, to determine the amount penalty. * * The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e., * fees plus penalty) for large payments. The penalty is computed as the product of this @@ -571,12 +595,131 @@ export class ProbabilisticScoringParameters extends CommonBase { * probabilities, the multiplier will have a decreasing effect as the negative `log10` will * fall below `1`. * - * Default value: 256 msat + * Default value: 192 msat */ public set_liquidity_penalty_amount_multiplier_msat(val: bigint): void { bindings.ProbabilisticScoringParameters_set_liquidity_penalty_amount_multiplier_msat(this.ptr, val); } + /** + * A multiplier used in conjunction with the negative `log10` of the channel's success + * probability for the payment, as determined based on the history of our estimates of the + * channel's available liquidity, to determine a penalty. + * + * This penalty is similar to [`liquidity_penalty_multiplier_msat`], however, instead of using + * only our latest estimate for the current liquidity available in the channel, it estimates + * success probability based on the estimated liquidity available in the channel through + * history. Specifically, every time we update our liquidity bounds on a given channel, we + * track which of several buckets those bounds fall into, exponentially decaying the + * probability of each bucket as new samples are added. + * + * Default value: 10,000 msat + * + * [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat + */ + public get_historical_liquidity_penalty_multiplier_msat(): bigint { + const ret: bigint = bindings.ProbabilisticScoringParameters_get_historical_liquidity_penalty_multiplier_msat(this.ptr); + return ret; + } + + /** + * A multiplier used in conjunction with the negative `log10` of the channel's success + * probability for the payment, as determined based on the history of our estimates of the + * channel's available liquidity, to determine a penalty. + * + * This penalty is similar to [`liquidity_penalty_multiplier_msat`], however, instead of using + * only our latest estimate for the current liquidity available in the channel, it estimates + * success probability based on the estimated liquidity available in the channel through + * history. Specifically, every time we update our liquidity bounds on a given channel, we + * track which of several buckets those bounds fall into, exponentially decaying the + * probability of each bucket as new samples are added. + * + * Default value: 10,000 msat + * + * [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat + */ + public set_historical_liquidity_penalty_multiplier_msat(val: bigint): void { + bindings.ProbabilisticScoringParameters_set_historical_liquidity_penalty_multiplier_msat(this.ptr, val); + } + + /** + * A multiplier used in conjunction with the payment amount and the negative `log10` of the + * channel's success probability for the payment, as determined based on the history of our + * estimates of the channel's available liquidity, to determine a penalty. + * + * The purpose of the amount penalty is to avoid having fees dominate the channel cost for + * large payments. The penalty is computed as the product of this multiplier and the `2^20`ths + * of the payment amount, weighted by the negative `log10` of the success probability. + * + * This penalty is similar to [`liquidity_penalty_amount_multiplier_msat`], however, instead + * of using only our latest estimate for the current liquidity available in the channel, it + * estimates success probability based on the estimated liquidity available in the channel + * through history. Specifically, every time we update our liquidity bounds on a given + * channel, we track which of several buckets those bounds fall into, exponentially decaying + * the probability of each bucket as new samples are added. + * + * Default value: 64 msat + * + * [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat + */ + public get_historical_liquidity_penalty_amount_multiplier_msat(): bigint { + const ret: bigint = bindings.ProbabilisticScoringParameters_get_historical_liquidity_penalty_amount_multiplier_msat(this.ptr); + return ret; + } + + /** + * A multiplier used in conjunction with the payment amount and the negative `log10` of the + * channel's success probability for the payment, as determined based on the history of our + * estimates of the channel's available liquidity, to determine a penalty. + * + * The purpose of the amount penalty is to avoid having fees dominate the channel cost for + * large payments. The penalty is computed as the product of this multiplier and the `2^20`ths + * of the payment amount, weighted by the negative `log10` of the success probability. + * + * This penalty is similar to [`liquidity_penalty_amount_multiplier_msat`], however, instead + * of using only our latest estimate for the current liquidity available in the channel, it + * estimates success probability based on the estimated liquidity available in the channel + * through history. Specifically, every time we update our liquidity bounds on a given + * channel, we track which of several buckets those bounds fall into, exponentially decaying + * the probability of each bucket as new samples are added. + * + * Default value: 64 msat + * + * [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat + */ + public set_historical_liquidity_penalty_amount_multiplier_msat(val: bigint): void { + bindings.ProbabilisticScoringParameters_set_historical_liquidity_penalty_amount_multiplier_msat(this.ptr, val); + } + + /** + * If we aren't learning any new datapoints for a channel, the historical liquidity bounds + * tracking can simply live on with increasingly stale data. Instead, when a channel has not + * seen a liquidity estimate update for this amount of time, the historical datapoints are + * decayed by half. + * + * Note that after 16 or more half lives all historical data will be completely gone. + * + * Default value: 14 days + */ + public get_historical_no_updates_half_life(): bigint { + const ret: bigint = bindings.ProbabilisticScoringParameters_get_historical_no_updates_half_life(this.ptr); + return ret; + } + + /** + * If we aren't learning any new datapoints for a channel, the historical liquidity bounds + * tracking can simply live on with increasingly stale data. Instead, when a channel has not + * seen a liquidity estimate update for this amount of time, the historical datapoints are + * decayed by half. + * + * Note that after 16 or more half lives all historical data will be completely gone. + * + * Default value: 14 days + */ + public set_historical_no_updates_half_life(val: bigint): void { + bindings.ProbabilisticScoringParameters_set_historical_no_updates_half_life(this.ptr, val); + } + /** * This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the * channel's capacity, which makes us prefer nodes with a smaller `htlc_maximum_msat`. We