[C#] Update auto-generated files
[ldk-java] / c_sharp / src / org / ldk / structs / ProbabilisticScorer.cs
1 using org.ldk.impl;
2 using org.ldk.enums;
3 using org.ldk.util;
4 using System;
5
6 namespace org { namespace ldk { namespace structs {
7
8
9 /**
10  * [`Score`] implementation using channel success probability distributions.
11  * 
12  * Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
13  * we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
14  * When a payment is forwarded through a channel (but fails later in the route), we learn the
15  * lower-bound on the channel's available liquidity must be at least the value of the HTLC.
16  * 
17  * These bounds are then used to determine a success probability using the formula from
18  * Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
19  * and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
20  * 
21  * This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
22  * [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
23  * milli-satoshis. The penalties, when added across all hops, have the property of being linear in
24  * terms of the entire path's success probability. This allows the router to directly compare
25  * penalties for different paths. See the documentation of those parameters for the exact formulas.
26  * 
27  * The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
28  * 
29  * Further, we track the history of our upper and lower liquidity bounds for each channel,
30  * allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
31  * and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
32  * formula, but using the history of a channel rather than our latest estimates for the liquidity
33  * bounds.
34  * 
35  * # Note
36  * 
37  * Mixing the `no-std` feature between serialization and deserialization results in undefined
38  * behavior.
39  * 
40  * [1]: https://arxiv.org/abs/2107.05322
41  * [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
42  * [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
43  * [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
44  * [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
45  * [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
46  */
47 public class ProbabilisticScorer : CommonBase {
48         internal ProbabilisticScorer(object _dummy, long ptr) : base(ptr) { }
49         ~ProbabilisticScorer() {
50                 if (ptr != 0) { bindings.ProbabilisticScorer_free(ptr); }
51         }
52
53         /**
54          * Creates a new scorer using the given scoring parameters for sending payments from a node
55          * through a network graph.
56          */
57         public static ProbabilisticScorer of(org.ldk.structs.ProbabilisticScoringDecayParameters decay_params, org.ldk.structs.NetworkGraph network_graph, org.ldk.structs.Logger logger) {
58                 long ret = bindings.ProbabilisticScorer_new(decay_params == null ? 0 : decay_params.ptr, network_graph == null ? 0 : network_graph.ptr, logger.ptr);
59                 GC.KeepAlive(decay_params);
60                 GC.KeepAlive(network_graph);
61                 GC.KeepAlive(logger);
62                 if (ret >= 0 && ret <= 4096) { return null; }
63                 org.ldk.structs.ProbabilisticScorer ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new org.ldk.structs.ProbabilisticScorer(null, ret); }
64                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(ret_hu_conv); };
65                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(decay_params); };
66                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(network_graph); };
67                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(logger); };
68                 return ret_hu_conv;
69         }
70
71         /**
72          * Dump the contents of this scorer into the configured logger.
73          * 
74          * Note that this writes roughly one line per channel for which we have a liquidity estimate,
75          * which may be a substantial amount of log output.
76          */
77         public void debug_log_liquidity_stats() {
78                 bindings.ProbabilisticScorer_debug_log_liquidity_stats(this.ptr);
79                 GC.KeepAlive(this);
80         }
81
82         /**
83          * Query the estimated minimum and maximum liquidity available for sending a payment over the
84          * channel with `scid` towards the given `target` node.
85          */
86         public Option_C2Tuple_u64u64ZZ estimated_channel_liquidity_range(long scid, org.ldk.structs.NodeId target) {
87                 long ret = bindings.ProbabilisticScorer_estimated_channel_liquidity_range(this.ptr, scid, target == null ? 0 : target.ptr);
88                 GC.KeepAlive(this);
89                 GC.KeepAlive(scid);
90                 GC.KeepAlive(target);
91                 if (ret >= 0 && ret <= 4096) { return null; }
92                 org.ldk.structs.Option_C2Tuple_u64u64ZZ ret_hu_conv = org.ldk.structs.Option_C2Tuple_u64u64ZZ.constr_from_ptr(ret);
93                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
94                 if (this != null) { this.ptrs_to.AddLast(target); };
95                 return ret_hu_conv;
96         }
97
98         /**
99          * Query the historical estimated minimum and maximum liquidity available for sending a
100          * payment over the channel with `scid` towards the given `target` node.
101          * 
102          * Returns two sets of 8 buckets. The first set describes the octiles for lower-bound
103          * liquidity estimates, the second set describes the octiles for upper-bound liquidity
104          * estimates. Each bucket describes the relative frequency at which we've seen a liquidity
105          * bound in the octile relative to the channel's total capacity, on an arbitrary scale.
106          * Because the values are slowly decayed, more recent data points are weighted more heavily
107          * than older datapoints.
108          * 
109          * When scoring, the estimated probability that an upper-/lower-bound lies in a given octile
110          * relative to the channel's total capacity is calculated by dividing that bucket's value with
111          * the total of all buckets for the given bound.
112          * 
113          * For example, a value of `[0, 0, 0, 0, 0, 0, 32]` indicates that we believe the probability
114          * of a bound being in the top octile to be 100%, and have never (recently) seen it in any
115          * other octiles. A value of `[31, 0, 0, 0, 0, 0, 0, 32]` indicates we've seen the bound being
116          * both in the top and bottom octile, and roughly with similar (recent) frequency.
117          * 
118          * Because the datapoints are decayed slowly over time, values will eventually return to
119          * `Some(([0; 8], [0; 8]))`.
120          */
121         public Option_C2Tuple_EightU16sEightU16sZZ historical_estimated_channel_liquidity_probabilities(long scid, org.ldk.structs.NodeId target) {
122                 long ret = bindings.ProbabilisticScorer_historical_estimated_channel_liquidity_probabilities(this.ptr, scid, target == null ? 0 : target.ptr);
123                 GC.KeepAlive(this);
124                 GC.KeepAlive(scid);
125                 GC.KeepAlive(target);
126                 if (ret >= 0 && ret <= 4096) { return null; }
127                 org.ldk.structs.Option_C2Tuple_EightU16sEightU16sZZ ret_hu_conv = org.ldk.structs.Option_C2Tuple_EightU16sEightU16sZZ.constr_from_ptr(ret);
128                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
129                 if (this != null) { this.ptrs_to.AddLast(target); };
130                 return ret_hu_conv;
131         }
132
133         /**
134          * Constructs a new Score which calls the relevant methods on this_arg.
135          * This copies the `inner` pointer in this_arg and thus the returned Score must be freed before this_arg is
136          */
137         public Score as_Score() {
138                 long ret = bindings.ProbabilisticScorer_as_Score(this.ptr);
139                 GC.KeepAlive(this);
140                 if (ret >= 0 && ret <= 4096) { return null; }
141                 Score ret_hu_conv = new Score(null, ret);
142                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
143                 return ret_hu_conv;
144         }
145
146         /**
147          * Serialize the ProbabilisticScorer object into a byte array which can be read by ProbabilisticScorer_read
148          */
149         public byte[] write() {
150                 byte[] ret = bindings.ProbabilisticScorer_write(this.ptr);
151                 GC.KeepAlive(this);
152                 return ret;
153         }
154
155         /**
156          * Read a ProbabilisticScorer from a byte array, created by ProbabilisticScorer_write
157          */
158         public static Result_ProbabilisticScorerDecodeErrorZ read(byte[] ser, org.ldk.structs.ProbabilisticScoringDecayParameters arg_a, org.ldk.structs.NetworkGraph arg_b, org.ldk.structs.Logger arg_c) {
159                 long ret = bindings.ProbabilisticScorer_read(ser, arg_a == null ? 0 : arg_a.ptr, arg_b == null ? 0 : arg_b.ptr, arg_c.ptr);
160                 GC.KeepAlive(ser);
161                 GC.KeepAlive(arg_a);
162                 GC.KeepAlive(arg_b);
163                 GC.KeepAlive(arg_c);
164                 if (ret >= 0 && ret <= 4096) { return null; }
165                 Result_ProbabilisticScorerDecodeErrorZ ret_hu_conv = Result_ProbabilisticScorerDecodeErrorZ.constr_from_ptr(ret);
166                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(arg_a); };
167                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(arg_b); };
168                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(arg_c); };
169                 return ret_hu_conv;
170         }
171
172 }
173 } } }