[C#] Run tests against release library in determinism CI run
[ldk-java] / c_sharp / src / org / ldk / structs / PeerManager.cs
1 using org.ldk.impl;
2 using org.ldk.enums;
3 using org.ldk.util;
4 using System;
5
6 namespace org { namespace ldk { namespace structs {
7
8
9 /**
10  * A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
11  * socket events into messages which it passes on to its [`MessageHandler`].
12  * 
13  * Locks are taken internally, so you must never assume that reentrancy from a
14  * [`SocketDescriptor`] call back into [`PeerManager`] methods will not deadlock.
15  * 
16  * Calls to [`read_event`] will decode relevant messages and pass them to the
17  * [`ChannelMessageHandler`], likely doing message processing in-line. Thus, the primary form of
18  * parallelism in Rust-Lightning is in calls to [`read_event`]. Note, however, that calls to any
19  * [`PeerManager`] functions related to the same connection must occur only in serial, making new
20  * calls only after previous ones have returned.
21  * 
22  * Rather than using a plain [`PeerManager`], it is preferable to use either a [`SimpleArcPeerManager`]
23  * a [`SimpleRefPeerManager`], for conciseness. See their documentation for more details, but
24  * essentially you should default to using a [`SimpleRefPeerManager`], and use a
25  * [`SimpleArcPeerManager`] when you require a `PeerManager` with a static lifetime, such as when
26  * you're using lightning-net-tokio.
27  * 
28  * [`read_event`]: PeerManager::read_event
29  */
30 public class PeerManager : CommonBase {
31         internal PeerManager(object _dummy, long ptr) : base(ptr) { }
32         ~PeerManager() {
33                 if (ptr != 0) { bindings.PeerManager_free(ptr); }
34         }
35
36         /**
37          * Constructs a new `PeerManager` with the given message handlers.
38          * 
39          * `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
40          * cryptographically secure random bytes.
41          * 
42          * `current_time` is used as an always-increasing counter that survives across restarts and is
43          * incremented irregularly internally. In general it is best to simply use the current UNIX
44          * timestamp, however if it is not available a persistent counter that increases once per
45          * minute should suffice.
46          */
47         public static PeerManager of(ChannelMessageHandler message_handler_chan_handler_arg, RoutingMessageHandler message_handler_route_handler_arg, OnionMessageHandler message_handler_onion_message_handler_arg, CustomMessageHandler message_handler_custom_message_handler_arg, int current_time, byte[] ephemeral_random_data, org.ldk.structs.Logger logger, org.ldk.structs.NodeSigner node_signer) {
48                 long ret = bindings.PeerManager_new(bindings.MessageHandler_new(message_handler_chan_handler_arg.ptr, message_handler_route_handler_arg.ptr, message_handler_onion_message_handler_arg.ptr, message_handler_custom_message_handler_arg.ptr), current_time, InternalUtils.encodeUint8Array(InternalUtils.check_arr_len(ephemeral_random_data, 32)), logger.ptr, node_signer.ptr);
49                 GC.KeepAlive(message_handler_chan_handler_arg);
50                 GC.KeepAlive(message_handler_route_handler_arg);
51                 GC.KeepAlive(message_handler_onion_message_handler_arg);
52                 GC.KeepAlive(message_handler_custom_message_handler_arg);
53                 GC.KeepAlive(current_time);
54                 GC.KeepAlive(ephemeral_random_data);
55                 GC.KeepAlive(logger);
56                 GC.KeepAlive(node_signer);
57                 if (ret >= 0 && ret <= 4096) { return null; }
58                 org.ldk.structs.PeerManager ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new org.ldk.structs.PeerManager(null, ret); }
59                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(ret_hu_conv); };
60                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(message_handler_chan_handler_arg); };
61                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(message_handler_route_handler_arg); };
62                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(message_handler_onion_message_handler_arg); };
63                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(message_handler_custom_message_handler_arg); };
64                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(logger); };
65                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(node_signer); };
66                 return ret_hu_conv;
67         }
68
69         /**
70          * Returns a list of [`PeerDetails`] for connected peers that have completed the initial
71          * handshake.
72          */
73         public PeerDetails[] list_peers() {
74                 long ret = bindings.PeerManager_list_peers(this.ptr);
75                 GC.KeepAlive(this);
76                 if (ret >= 0 && ret <= 4096) { return null; }
77                 int ret_conv_13_len = InternalUtils.getArrayLength(ret);
78                 PeerDetails[] ret_conv_13_arr = new PeerDetails[ret_conv_13_len];
79                 for (int n = 0; n < ret_conv_13_len; n++) {
80                         long ret_conv_13 = InternalUtils.getU64ArrayElem(ret, n);
81                         org.ldk.structs.PeerDetails ret_conv_13_hu_conv = null; if (ret_conv_13 < 0 || ret_conv_13 > 4096) { ret_conv_13_hu_conv = new org.ldk.structs.PeerDetails(null, ret_conv_13); }
82                         if (ret_conv_13_hu_conv != null) { ret_conv_13_hu_conv.ptrs_to.AddLast(this); };
83                         ret_conv_13_arr[n] = ret_conv_13_hu_conv;
84                 }
85                 bindings.free_buffer(ret);
86                 return ret_conv_13_arr;
87         }
88
89         /**
90          * Returns the [`PeerDetails`] of a connected peer that has completed the initial handshake.
91          * 
92          * Will return `None` if the peer is unknown or it hasn't completed the initial handshake.
93          * 
94          * Note that the return value (or a relevant inner pointer) may be NULL or all-0s to represent None
95          */
96         public PeerDetails peer_by_node_id(byte[] their_node_id) {
97                 long ret = bindings.PeerManager_peer_by_node_id(this.ptr, InternalUtils.encodeUint8Array(InternalUtils.check_arr_len(their_node_id, 33)));
98                 GC.KeepAlive(this);
99                 GC.KeepAlive(their_node_id);
100                 if (ret >= 0 && ret <= 4096) { return null; }
101                 org.ldk.structs.PeerDetails ret_hu_conv = null; if (ret < 0 || ret > 4096) { ret_hu_conv = new org.ldk.structs.PeerDetails(null, ret); }
102                 if (ret_hu_conv != null) { ret_hu_conv.ptrs_to.AddLast(this); };
103                 return ret_hu_conv;
104         }
105
106         /**
107          * Indicates a new outbound connection has been established to a node with the given `node_id`
108          * and an optional remote network address.
109          * 
110          * The remote network address adds the option to report a remote IP address back to a connecting
111          * peer using the init message.
112          * The user should pass the remote network address of the host they are connected to.
113          * 
114          * If an `Err` is returned here you must disconnect the connection immediately.
115          * 
116          * Returns a small number of bytes to send to the remote node (currently always 50).
117          * 
118          * Panics if descriptor is duplicative with some other descriptor which has not yet been
119          * [`socket_disconnected`].
120          * 
121          * [`socket_disconnected`]: PeerManager::socket_disconnected
122          */
123         public Result_CVec_u8ZPeerHandleErrorZ new_outbound_connection(byte[] their_node_id, org.ldk.structs.SocketDescriptor descriptor, org.ldk.structs.Option_SocketAddressZ remote_network_address) {
124                 long ret = bindings.PeerManager_new_outbound_connection(this.ptr, InternalUtils.encodeUint8Array(InternalUtils.check_arr_len(their_node_id, 33)), descriptor.ptr, remote_network_address.ptr);
125                 GC.KeepAlive(this);
126                 GC.KeepAlive(their_node_id);
127                 GC.KeepAlive(descriptor);
128                 GC.KeepAlive(remote_network_address);
129                 if (ret >= 0 && ret <= 4096) { return null; }
130                 Result_CVec_u8ZPeerHandleErrorZ ret_hu_conv = Result_CVec_u8ZPeerHandleErrorZ.constr_from_ptr(ret);
131                 if (this != null) { this.ptrs_to.AddLast(descriptor); };
132                 if (this != null) { this.ptrs_to.AddLast(remote_network_address); };
133                 return ret_hu_conv;
134         }
135
136         /**
137          * Indicates a new inbound connection has been established to a node with an optional remote
138          * network address.
139          * 
140          * The remote network address adds the option to report a remote IP address back to a connecting
141          * peer using the init message.
142          * The user should pass the remote network address of the host they are connected to.
143          * 
144          * May refuse the connection by returning an Err, but will never write bytes to the remote end
145          * (outbound connector always speaks first). If an `Err` is returned here you must disconnect
146          * the connection immediately.
147          * 
148          * Panics if descriptor is duplicative with some other descriptor which has not yet been
149          * [`socket_disconnected`].
150          * 
151          * [`socket_disconnected`]: PeerManager::socket_disconnected
152          */
153         public Result_NonePeerHandleErrorZ new_inbound_connection(org.ldk.structs.SocketDescriptor descriptor, org.ldk.structs.Option_SocketAddressZ remote_network_address) {
154                 long ret = bindings.PeerManager_new_inbound_connection(this.ptr, descriptor.ptr, remote_network_address.ptr);
155                 GC.KeepAlive(this);
156                 GC.KeepAlive(descriptor);
157                 GC.KeepAlive(remote_network_address);
158                 if (ret >= 0 && ret <= 4096) { return null; }
159                 Result_NonePeerHandleErrorZ ret_hu_conv = Result_NonePeerHandleErrorZ.constr_from_ptr(ret);
160                 if (this != null) { this.ptrs_to.AddLast(descriptor); };
161                 if (this != null) { this.ptrs_to.AddLast(remote_network_address); };
162                 return ret_hu_conv;
163         }
164
165         /**
166          * Indicates that there is room to write data to the given socket descriptor.
167          * 
168          * May return an Err to indicate that the connection should be closed.
169          * 
170          * May call [`send_data`] on the descriptor passed in (or an equal descriptor) before
171          * returning. Thus, be very careful with reentrancy issues! The invariants around calling
172          * [`write_buffer_space_avail`] in case a write did not fully complete must still hold - be
173          * ready to call [`write_buffer_space_avail`] again if a write call generated here isn't
174          * sufficient!
175          * 
176          * [`send_data`]: SocketDescriptor::send_data
177          * [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
178          */
179         public Result_NonePeerHandleErrorZ write_buffer_space_avail(org.ldk.structs.SocketDescriptor descriptor) {
180                 long ret = bindings.PeerManager_write_buffer_space_avail(this.ptr, descriptor.ptr);
181                 GC.KeepAlive(this);
182                 GC.KeepAlive(descriptor);
183                 if (ret >= 0 && ret <= 4096) { return null; }
184                 Result_NonePeerHandleErrorZ ret_hu_conv = Result_NonePeerHandleErrorZ.constr_from_ptr(ret);
185                 return ret_hu_conv;
186         }
187
188         /**
189          * Indicates that data was read from the given socket descriptor.
190          * 
191          * May return an Err to indicate that the connection should be closed.
192          * 
193          * Will *not* call back into [`send_data`] on any descriptors to avoid reentrancy complexity.
194          * Thus, however, you should call [`process_events`] after any `read_event` to generate
195          * [`send_data`] calls to handle responses.
196          * 
197          * If `Ok(true)` is returned, further read_events should not be triggered until a
198          * [`send_data`] call on this descriptor has `resume_read` set (preventing DoS issues in the
199          * send buffer).
200          * 
201          * In order to avoid processing too many messages at once per peer, `data` should be on the
202          * order of 4KiB.
203          * 
204          * [`send_data`]: SocketDescriptor::send_data
205          * [`process_events`]: PeerManager::process_events
206          */
207         public Result_boolPeerHandleErrorZ read_event(org.ldk.structs.SocketDescriptor peer_descriptor, byte[] data) {
208                 long ret = bindings.PeerManager_read_event(this.ptr, peer_descriptor.ptr, InternalUtils.encodeUint8Array(data));
209                 GC.KeepAlive(this);
210                 GC.KeepAlive(peer_descriptor);
211                 GC.KeepAlive(data);
212                 if (ret >= 0 && ret <= 4096) { return null; }
213                 Result_boolPeerHandleErrorZ ret_hu_conv = Result_boolPeerHandleErrorZ.constr_from_ptr(ret);
214                 return ret_hu_conv;
215         }
216
217         /**
218          * Checks for any events generated by our handlers and processes them. Includes sending most
219          * response messages as well as messages generated by calls to handler functions directly (eg
220          * functions like [`ChannelManager::process_pending_htlc_forwards`] or [`send_payment`]).
221          * 
222          * May call [`send_data`] on [`SocketDescriptor`]s. Thus, be very careful with reentrancy
223          * issues!
224          * 
225          * You don't have to call this function explicitly if you are using [`lightning-net-tokio`]
226          * or one of the other clients provided in our language bindings.
227          * 
228          * Note that if there are any other calls to this function waiting on lock(s) this may return
229          * without doing any work. All available events that need handling will be handled before the
230          * other calls return.
231          * 
232          * [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
233          * [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
234          * [`send_data`]: SocketDescriptor::send_data
235          */
236         public void process_events() {
237                 bindings.PeerManager_process_events(this.ptr);
238                 GC.KeepAlive(this);
239         }
240
241         /**
242          * Indicates that the given socket descriptor's connection is now closed.
243          */
244         public void socket_disconnected(org.ldk.structs.SocketDescriptor descriptor) {
245                 bindings.PeerManager_socket_disconnected(this.ptr, descriptor.ptr);
246                 GC.KeepAlive(this);
247                 GC.KeepAlive(descriptor);
248         }
249
250         /**
251          * Disconnect a peer given its node id.
252          * 
253          * If a peer is connected, this will call [`disconnect_socket`] on the descriptor for the
254          * peer. Thus, be very careful about reentrancy issues.
255          * 
256          * [`disconnect_socket`]: SocketDescriptor::disconnect_socket
257          */
258         public void disconnect_by_node_id(byte[] node_id) {
259                 bindings.PeerManager_disconnect_by_node_id(this.ptr, InternalUtils.encodeUint8Array(InternalUtils.check_arr_len(node_id, 33)));
260                 GC.KeepAlive(this);
261                 GC.KeepAlive(node_id);
262         }
263
264         /**
265          * Disconnects all currently-connected peers. This is useful on platforms where there may be
266          * an indication that TCP sockets have stalled even if we weren't around to time them out
267          * using regular ping/pongs.
268          */
269         public void disconnect_all_peers() {
270                 bindings.PeerManager_disconnect_all_peers(this.ptr);
271                 GC.KeepAlive(this);
272         }
273
274         /**
275          * Send pings to each peer and disconnect those which did not respond to the last round of
276          * pings.
277          * 
278          * This may be called on any timescale you want, however, roughly once every ten seconds is
279          * preferred. The call rate determines both how often we send a ping to our peers and how much
280          * time they have to respond before we disconnect them.
281          * 
282          * May call [`send_data`] on all [`SocketDescriptor`]s. Thus, be very careful with reentrancy
283          * issues!
284          * 
285          * [`send_data`]: SocketDescriptor::send_data
286          */
287         public void timer_tick_occurred() {
288                 bindings.PeerManager_timer_tick_occurred(this.ptr);
289                 GC.KeepAlive(this);
290         }
291
292         /**
293          * Generates a signed node_announcement from the given arguments, sending it to all connected
294          * peers. Note that peers will likely ignore this message unless we have at least one public
295          * channel which has at least six confirmations on-chain.
296          * 
297          * `rgb` is a node \"color\" and `alias` is a printable human-readable string to describe this
298          * node to humans. They carry no in-protocol meaning.
299          * 
300          * `addresses` represent the set (possibly empty) of socket addresses on which this node
301          * accepts incoming connections. These will be included in the node_announcement, publicly
302          * tying these addresses together and to this node. If you wish to preserve user privacy,
303          * addresses should likely contain only Tor Onion addresses.
304          * 
305          * Panics if `addresses` is absurdly large (more than 100).
306          * 
307          * [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
308          */
309         public void broadcast_node_announcement(byte[] rgb, byte[] alias, SocketAddress[] addresses) {
310                 bindings.PeerManager_broadcast_node_announcement(this.ptr, InternalUtils.encodeUint8Array(InternalUtils.check_arr_len(rgb, 3)), InternalUtils.encodeUint8Array(InternalUtils.check_arr_len(alias, 32)), InternalUtils.encodeUint64Array(InternalUtils.mapArray(addresses, addresses_conv_15 => addresses_conv_15.ptr)));
311                 GC.KeepAlive(this);
312                 GC.KeepAlive(rgb);
313                 GC.KeepAlive(alias);
314                 GC.KeepAlive(addresses);
315                 foreach (SocketAddress addresses_conv_15 in addresses) { if (this != null) { this.ptrs_to.AddLast(addresses_conv_15); }; };
316         }
317
318 }
319 } } }