[C#] Update auto-generated C# bindings
[ldk-java] / c_sharp / src / org / ldk / structs / ProbabilisticScorer.cs
1 using org.ldk.impl;
2 using org.ldk.enums;
3 using org.ldk.util;
4 using System;
5
6 namespace org { namespace ldk { namespace structs {
7
8
9 /**
10  * [`ScoreLookUp`] implementation using channel success probability distributions.
11  * 
12  * Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
13  * we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
14  * When a payment is forwarded through a channel (but fails later in the route), we learn the
15  * lower-bound on the channel's available liquidity must be at least the value of the HTLC.
16  * 
17  * These bounds are then used to determine a success probability using the formula from
18  * Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
19  * and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
20  * 
21  * This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
22  * [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
23  * milli-satoshis. The penalties, when added across all hops, have the property of being linear in
24  * terms of the entire path's success probability. This allows the router to directly compare
25  * penalties for different paths. See the documentation of those parameters for the exact formulas.
26  * 
27  * The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
28  * 
29  * Further, we track the history of our upper and lower liquidity bounds for each channel,
30  * allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
31  * and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
32  * formula, but using the history of a channel rather than our latest estimates for the liquidity
33  * bounds.
34  * 
35  * [1]: https://arxiv.org/abs/2107.05322
36  * [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
37  * [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
38  * [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
39  * [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
40  * [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
41  */
42 public class ProbabilisticScorer : CommonBase {
43         internal ProbabilisticScorer(object _dummy, long ptr) : base(ptr) { }
44         ~ProbabilisticScorer() {
45                 if (ptr != 0) { bindings.ProbabilisticScorer_free(ptr); }
46         }
47
48         /**
49          * Creates a new scorer using the given scoring parameters for sending payments from a node
50          * through a network graph.
51          */
52         public static ProbabilisticScorer of(org.ldk.structs.ProbabilisticScoringDecayParameters decay_params, org.ldk.structs.NetworkGraph network_graph, org.ldk.structs.Logger logger) {
53                 long ret = bindings.ProbabilisticScorer_new(decay_params == null ? 0 : decay_params.ptr, network_graph == null ? 0 : network_graph.ptr, logger.ptr);
54                 GC.KeepAlive(decay_params);
55                 GC.KeepAlive(network_graph);
56                 GC.KeepAlive(logger);
57                 if (ret >= 0 && ret <= 4096) { return null; }
58                 org.ldk.structs.ProbabilisticScorer ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new org.ldk.structs.ProbabilisticScorer(null, ret); }
59                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(ret_hu_conv); };
60                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(decay_params); };
61                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(network_graph); };
62                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(logger); };
63                 return ret_hu_conv;
64         }
65
66         /**
67          * Dump the contents of this scorer into the configured logger.
68          * 
69          * Note that this writes roughly one line per channel for which we have a liquidity estimate,
70          * which may be a substantial amount of log output.
71          */
72         public void debug_log_liquidity_stats() {
73                 bindings.ProbabilisticScorer_debug_log_liquidity_stats(this.ptr);
74                 GC.KeepAlive(this);
75         }
76
77         /**
78          * Query the estimated minimum and maximum liquidity available for sending a payment over the
79          * channel with `scid` towards the given `target` node.
80          */
81         public Option_C2Tuple_u64u64ZZ estimated_channel_liquidity_range(long scid, org.ldk.structs.NodeId target) {
82                 long ret = bindings.ProbabilisticScorer_estimated_channel_liquidity_range(this.ptr, scid, target == null ? 0 : target.ptr);
83                 GC.KeepAlive(this);
84                 GC.KeepAlive(scid);
85                 GC.KeepAlive(target);
86                 if (ret >= 0 && ret <= 4096) { return null; }
87                 org.ldk.structs.Option_C2Tuple_u64u64ZZ ret_hu_conv = org.ldk.structs.Option_C2Tuple_u64u64ZZ.constr_from_ptr(ret);
88                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
89                 if (this != null) { this.ptrs_to.AddLast(target); };
90                 return ret_hu_conv;
91         }
92
93         /**
94          * Query the historical estimated minimum and maximum liquidity available for sending a
95          * payment over the channel with `scid` towards the given `target` node.
96          * 
97          * Returns two sets of 32 buckets. The first set describes the lower-bound liquidity history,
98          * the second set describes the upper-bound liquidity history. Each bucket describes the
99          * relative frequency at which we've seen a liquidity bound in the bucket's range relative to
100          * the channel's total capacity, on an arbitrary scale. Because the values are slowly decayed,
101          * more recent data points are weighted more heavily than older datapoints.
102          * 
103          * Note that the range of each bucket varies by its location to provide more granular results
104          * at the edges of a channel's capacity, where it is more likely to sit.
105          * 
106          * When scoring, the estimated probability that an upper-/lower-bound lies in a given bucket
107          * is calculated by dividing that bucket's value with the total value of all buckets.
108          * 
109          * For example, using a lower bucket count for illustrative purposes, a value of
110          * `[0, 0, 0, ..., 0, 32]` indicates that we believe the probability of a bound being very
111          * close to the channel's capacity to be 100%, and have never (recently) seen it in any other
112          * bucket. A value of `[31, 0, 0, ..., 0, 0, 32]` indicates we've seen the bound being both
113          * in the top and bottom bucket, and roughly with similar (recent) frequency.
114          * 
115          * Because the datapoints are decayed slowly over time, values will eventually return to
116          * `Some(([0; 32], [0; 32]))` or `None` if no data remains for a channel.
117          * 
118          * In order to fetch a single success probability from the buckets provided here, as used in
119          * the scoring model, see [`Self::historical_estimated_payment_success_probability`].
120          */
121         public Option_C2Tuple_ThirtyTwoU16sThirtyTwoU16sZZ historical_estimated_channel_liquidity_probabilities(long scid, org.ldk.structs.NodeId target) {
122                 long ret = bindings.ProbabilisticScorer_historical_estimated_channel_liquidity_probabilities(this.ptr, scid, target == null ? 0 : target.ptr);
123                 GC.KeepAlive(this);
124                 GC.KeepAlive(scid);
125                 GC.KeepAlive(target);
126                 if (ret >= 0 && ret <= 4096) { return null; }
127                 org.ldk.structs.Option_C2Tuple_ThirtyTwoU16sThirtyTwoU16sZZ ret_hu_conv = org.ldk.structs.Option_C2Tuple_ThirtyTwoU16sThirtyTwoU16sZZ.constr_from_ptr(ret);
128                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
129                 if (this != null) { this.ptrs_to.AddLast(target); };
130                 return ret_hu_conv;
131         }
132
133         /**
134          * Query the probability of payment success sending the given `amount_msat` over the channel
135          * with `scid` towards the given `target` node, based on the historical estimated liquidity
136          * bounds.
137          * 
138          * These are the same bounds as returned by
139          * [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
140          * [`Self::estimated_channel_liquidity_range`]).
141          */
142         public Option_f64Z historical_estimated_payment_success_probability(long scid, org.ldk.structs.NodeId target, long amount_msat, org.ldk.structs.ProbabilisticScoringFeeParameters _params) {
143                 long ret = bindings.ProbabilisticScorer_historical_estimated_payment_success_probability(this.ptr, scid, target == null ? 0 : target.ptr, amount_msat, _params == null ? 0 : _params.ptr);
144                 GC.KeepAlive(this);
145                 GC.KeepAlive(scid);
146                 GC.KeepAlive(target);
147                 GC.KeepAlive(amount_msat);
148                 GC.KeepAlive(_params);
149                 if (ret >= 0 && ret <= 4096) { return null; }
150                 org.ldk.structs.Option_f64Z ret_hu_conv = org.ldk.structs.Option_f64Z.constr_from_ptr(ret);
151                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
152                 if (this != null) { this.ptrs_to.AddLast(target); };
153                 if (this != null) { this.ptrs_to.AddLast(_params); };
154                 return ret_hu_conv;
155         }
156
157         /**
158          * Constructs a new ScoreLookUp which calls the relevant methods on this_arg.
159          * This copies the `inner` pointer in this_arg and thus the returned ScoreLookUp must be freed before this_arg is
160          */
161         public ScoreLookUp as_ScoreLookUp() {
162                 long ret = bindings.ProbabilisticScorer_as_ScoreLookUp(this.ptr);
163                 GC.KeepAlive(this);
164                 if (ret >= 0 && ret <= 4096) { return null; }
165                 ScoreLookUp ret_hu_conv = new ScoreLookUp(null, ret);
166                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
167                 return ret_hu_conv;
168         }
169
170         /**
171          * Constructs a new ScoreUpdate which calls the relevant methods on this_arg.
172          * This copies the `inner` pointer in this_arg and thus the returned ScoreUpdate must be freed before this_arg is
173          */
174         public ScoreUpdate as_ScoreUpdate() {
175                 long ret = bindings.ProbabilisticScorer_as_ScoreUpdate(this.ptr);
176                 GC.KeepAlive(this);
177                 if (ret >= 0 && ret <= 4096) { return null; }
178                 ScoreUpdate ret_hu_conv = new ScoreUpdate(null, ret);
179                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
180                 return ret_hu_conv;
181         }
182
183         /**
184          * Constructs a new Score which calls the relevant methods on this_arg.
185          * This copies the `inner` pointer in this_arg and thus the returned Score must be freed before this_arg is
186          */
187         public Score as_Score() {
188                 long ret = bindings.ProbabilisticScorer_as_Score(this.ptr);
189                 GC.KeepAlive(this);
190                 if (ret >= 0 && ret <= 4096) { return null; }
191                 Score ret_hu_conv = new Score(null, ret);
192                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
193                 return ret_hu_conv;
194         }
195
196         /**
197          * Serialize the ProbabilisticScorer object into a byte array which can be read by ProbabilisticScorer_read
198          */
199         public byte[] write() {
200                 long ret = bindings.ProbabilisticScorer_write(this.ptr);
201                 GC.KeepAlive(this);
202                 if (ret >= 0 && ret <= 4096) { return null; }
203                 byte[] ret_conv = InternalUtils.decodeUint8Array(ret);
204                 return ret_conv;
205         }
206
207         /**
208          * Read a ProbabilisticScorer from a byte array, created by ProbabilisticScorer_write
209          */
210         public static Result_ProbabilisticScorerDecodeErrorZ read(byte[] ser, org.ldk.structs.ProbabilisticScoringDecayParameters arg_a, org.ldk.structs.NetworkGraph arg_b, org.ldk.structs.Logger arg_c) {
211                 long ret = bindings.ProbabilisticScorer_read(InternalUtils.encodeUint8Array(ser), arg_a == null ? 0 : arg_a.ptr, arg_b == null ? 0 : arg_b.ptr, arg_c.ptr);
212                 GC.KeepAlive(ser);
213                 GC.KeepAlive(arg_a);
214                 GC.KeepAlive(arg_b);
215                 GC.KeepAlive(arg_c);
216                 if (ret >= 0 && ret <= 4096) { return null; }
217                 Result_ProbabilisticScorerDecodeErrorZ ret_hu_conv = Result_ProbabilisticScorerDecodeErrorZ.constr_from_ptr(ret);
218                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(arg_a); };
219                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(arg_b); };
220                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(arg_c); };
221                 return ret_hu_conv;
222         }
223
224 }
225 } } }