1 package org.ldk.structs;
3 import org.ldk.impl.bindings;
4 import org.ldk.enums.*;
6 import java.util.Arrays;
7 import java.lang.ref.Reference;
8 import javax.annotation.Nullable;
12 * [`ScoreLookUp`] implementation using channel success probability distributions.
14 * Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
15 * we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
16 * When a payment is forwarded through a channel (but fails later in the route), we learn the
17 * lower-bound on the channel's available liquidity must be at least the value of the HTLC.
19 * These bounds are then used to determine a success probability using the formula from
20 * Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
21 * and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
23 * This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
24 * [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
25 * milli-satoshis. The penalties, when added across all hops, have the property of being linear in
26 * terms of the entire path's success probability. This allows the router to directly compare
27 * penalties for different paths. See the documentation of those parameters for the exact formulas.
29 * The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
31 * Further, we track the history of our upper and lower liquidity bounds for each channel,
32 * allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
33 * and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
34 * formula, but using the history of a channel rather than our latest estimates for the liquidity
37 * [1]: https://arxiv.org/abs/2107.05322
38 * [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
39 * [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
40 * [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
41 * [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
42 * [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
44 @SuppressWarnings("unchecked") // We correctly assign various generic arrays
45 public class ProbabilisticScorer extends CommonBase {
46 ProbabilisticScorer(Object _dummy, long ptr) { super(ptr); }
47 @Override @SuppressWarnings("deprecation")
48 protected void finalize() throws Throwable {
50 if (ptr != 0) { bindings.ProbabilisticScorer_free(ptr); }
54 * Creates a new scorer using the given scoring parameters for sending payments from a node
55 * through a network graph.
57 public static ProbabilisticScorer of(org.ldk.structs.ProbabilisticScoringDecayParameters decay_params, org.ldk.structs.NetworkGraph network_graph, org.ldk.structs.Logger logger) {
58 long ret = bindings.ProbabilisticScorer_new(decay_params.ptr, network_graph.ptr, logger.ptr);
59 Reference.reachabilityFence(decay_params);
60 Reference.reachabilityFence(network_graph);
61 Reference.reachabilityFence(logger);
62 if (ret >= 0 && ret <= 4096) { return null; }
63 org.ldk.structs.ProbabilisticScorer ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new org.ldk.structs.ProbabilisticScorer(null, ret); }
64 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(ret_hu_conv); };
65 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(decay_params); };
66 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(network_graph); };
67 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(logger); };
72 * Dump the contents of this scorer into the configured logger.
74 * Note that this writes roughly one line per channel for which we have a liquidity estimate,
75 * which may be a substantial amount of log output.
77 public void debug_log_liquidity_stats() {
78 bindings.ProbabilisticScorer_debug_log_liquidity_stats(this.ptr);
79 Reference.reachabilityFence(this);
83 * Query the estimated minimum and maximum liquidity available for sending a payment over the
84 * channel with `scid` towards the given `target` node.
86 public Option_C2Tuple_u64u64ZZ estimated_channel_liquidity_range(long scid, org.ldk.structs.NodeId target) {
87 long ret = bindings.ProbabilisticScorer_estimated_channel_liquidity_range(this.ptr, scid, target.ptr);
88 Reference.reachabilityFence(this);
89 Reference.reachabilityFence(scid);
90 Reference.reachabilityFence(target);
91 if (ret >= 0 && ret <= 4096) { return null; }
92 org.ldk.structs.Option_C2Tuple_u64u64ZZ ret_hu_conv = org.ldk.structs.Option_C2Tuple_u64u64ZZ.constr_from_ptr(ret);
93 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(this); };
94 if (this != null) { this.ptrs_to.add(target); };
99 * Query the historical estimated minimum and maximum liquidity available for sending a
100 * payment over the channel with `scid` towards the given `target` node.
102 * Returns two sets of 32 buckets. The first set describes the lower-bound liquidity history,
103 * the second set describes the upper-bound liquidity history. Each bucket describes the
104 * relative frequency at which we've seen a liquidity bound in the bucket's range relative to
105 * the channel's total capacity, on an arbitrary scale. Because the values are slowly decayed,
106 * more recent data points are weighted more heavily than older datapoints.
108 * Note that the range of each bucket varies by its location to provide more granular results
109 * at the edges of a channel's capacity, where it is more likely to sit.
111 * When scoring, the estimated probability that an upper-/lower-bound lies in a given bucket
112 * is calculated by dividing that bucket's value with the total value of all buckets.
114 * For example, using a lower bucket count for illustrative purposes, a value of
115 * `[0, 0, 0, ..., 0, 32]` indicates that we believe the probability of a bound being very
116 * close to the channel's capacity to be 100%, and have never (recently) seen it in any other
117 * bucket. A value of `[31, 0, 0, ..., 0, 0, 32]` indicates we've seen the bound being both
118 * in the top and bottom bucket, and roughly with similar (recent) frequency.
120 * Because the datapoints are decayed slowly over time, values will eventually return to
121 * `Some(([0; 32], [0; 32]))` or `None` if no data remains for a channel.
123 * In order to fetch a single success probability from the buckets provided here, as used in
124 * the scoring model, see [`Self::historical_estimated_payment_success_probability`].
126 public Option_C2Tuple_ThirtyTwoU16sThirtyTwoU16sZZ historical_estimated_channel_liquidity_probabilities(long scid, org.ldk.structs.NodeId target) {
127 long ret = bindings.ProbabilisticScorer_historical_estimated_channel_liquidity_probabilities(this.ptr, scid, target.ptr);
128 Reference.reachabilityFence(this);
129 Reference.reachabilityFence(scid);
130 Reference.reachabilityFence(target);
131 if (ret >= 0 && ret <= 4096) { return null; }
132 org.ldk.structs.Option_C2Tuple_ThirtyTwoU16sThirtyTwoU16sZZ ret_hu_conv = org.ldk.structs.Option_C2Tuple_ThirtyTwoU16sThirtyTwoU16sZZ.constr_from_ptr(ret);
133 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(this); };
134 if (this != null) { this.ptrs_to.add(target); };
139 * Query the probability of payment success sending the given `amount_msat` over the channel
140 * with `scid` towards the given `target` node, based on the historical estimated liquidity
143 * These are the same bounds as returned by
144 * [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
145 * [`Self::estimated_channel_liquidity_range`]).
147 public Option_f64Z historical_estimated_payment_success_probability(long scid, org.ldk.structs.NodeId target, long amount_msat, org.ldk.structs.ProbabilisticScoringFeeParameters params) {
148 long ret = bindings.ProbabilisticScorer_historical_estimated_payment_success_probability(this.ptr, scid, target.ptr, amount_msat, params.ptr);
149 Reference.reachabilityFence(this);
150 Reference.reachabilityFence(scid);
151 Reference.reachabilityFence(target);
152 Reference.reachabilityFence(amount_msat);
153 Reference.reachabilityFence(params);
154 if (ret >= 0 && ret <= 4096) { return null; }
155 org.ldk.structs.Option_f64Z ret_hu_conv = org.ldk.structs.Option_f64Z.constr_from_ptr(ret);
156 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(this); };
157 if (this != null) { this.ptrs_to.add(target); };
158 if (this != null) { this.ptrs_to.add(params); };
163 * Constructs a new ScoreLookUp which calls the relevant methods on this_arg.
164 * This copies the `inner` pointer in this_arg and thus the returned ScoreLookUp must be freed before this_arg is
166 public ScoreLookUp as_ScoreLookUp() {
167 long ret = bindings.ProbabilisticScorer_as_ScoreLookUp(this.ptr);
168 Reference.reachabilityFence(this);
169 if (ret >= 0 && ret <= 4096) { return null; }
170 ScoreLookUp ret_hu_conv = new ScoreLookUp(null, ret);
171 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(this); };
176 * Constructs a new ScoreUpdate which calls the relevant methods on this_arg.
177 * This copies the `inner` pointer in this_arg and thus the returned ScoreUpdate must be freed before this_arg is
179 public ScoreUpdate as_ScoreUpdate() {
180 long ret = bindings.ProbabilisticScorer_as_ScoreUpdate(this.ptr);
181 Reference.reachabilityFence(this);
182 if (ret >= 0 && ret <= 4096) { return null; }
183 ScoreUpdate ret_hu_conv = new ScoreUpdate(null, ret);
184 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(this); };
189 * Constructs a new Score which calls the relevant methods on this_arg.
190 * This copies the `inner` pointer in this_arg and thus the returned Score must be freed before this_arg is
192 public Score as_Score() {
193 long ret = bindings.ProbabilisticScorer_as_Score(this.ptr);
194 Reference.reachabilityFence(this);
195 if (ret >= 0 && ret <= 4096) { return null; }
196 Score ret_hu_conv = new Score(null, ret);
197 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(this); };
202 * Serialize the ProbabilisticScorer object into a byte array which can be read by ProbabilisticScorer_read
204 public byte[] write() {
205 byte[] ret = bindings.ProbabilisticScorer_write(this.ptr);
206 Reference.reachabilityFence(this);
211 * Read a ProbabilisticScorer from a byte array, created by ProbabilisticScorer_write
213 public static Result_ProbabilisticScorerDecodeErrorZ read(byte[] ser, org.ldk.structs.ProbabilisticScoringDecayParameters arg_a, org.ldk.structs.NetworkGraph arg_b, org.ldk.structs.Logger arg_c) {
214 long ret = bindings.ProbabilisticScorer_read(ser, arg_a.ptr, arg_b.ptr, arg_c.ptr);
215 Reference.reachabilityFence(ser);
216 Reference.reachabilityFence(arg_a);
217 Reference.reachabilityFence(arg_b);
218 Reference.reachabilityFence(arg_c);
219 if (ret >= 0 && ret <= 4096) { return null; }
220 Result_ProbabilisticScorerDecodeErrorZ ret_hu_conv = Result_ProbabilisticScorerDecodeErrorZ.constr_from_ptr(ret);
221 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(arg_a); };
222 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(arg_b); };
223 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.add(arg_c); };