]> git.bitcoin.ninja Git - rust-lightning/blob - lightning-net-tokio/src/lib.rs
ICE demo
[rust-lightning] / lightning-net-tokio / src / lib.rs
1 use secp256k1::key::PublicKey;
2
3 use tokio::net::TcpStream;
4 use tokio::{io, time};
5 use tokio::sync::{mpsc, oneshot};
6 use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
7
8 use lightning::ln::peer_handler;
9 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
10 use lightning::ln::msgs::ChannelMessageHandler;
11
12 use std::task;
13 use std::net::SocketAddr;
14 use std::sync::{Arc, Mutex};
15 use std::sync::atomic::{AtomicU64, Ordering};
16 use std::time::Duration;
17 use std::hash::Hash;
18
19 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
20
21 /// A connection to a remote peer. Can be constructed either as a remote connection using
22 /// Connection::setup_outbound
23 pub struct Connection {
24         writer: Option<io::WriteHalf<TcpStream>>,
25         event_notify: mpsc::Sender<()>,
26         // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
27         // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
28         // between being woken up with write-ready and calling PeerManager::write_event. This provides
29         // that indirection, with a Sender which gets handed to the PeerManager Arc on the
30         // schedule_read stack.
31         //
32         // An alternative (likely more effecient) approach would involve creating a RawWakerVTable at
33         // runtime with functions templated by the Arc<PeerManager> type, calling write_event directly
34         // from tokio's write wake, however doing so would require more unsafe voodo than I really feel
35         // like writing.
36         write_avail: mpsc::Sender<()>,
37         // When we are told by rust-lightning to pause read (because we have writes backing up), we do
38         // so by setting read_paused. If the read thread thereafter reads some data, it will place a
39         // Sender here and then block on it.
40         read_blocker: Option<oneshot::Sender<()>>,
41         read_paused: bool,
42         // If we get disconnected via SocketDescriptor::disconnect_socket(), we don't call
43         // disconnect_event(), but if we get an Err return value out of PeerManager, in general, we do.
44         // We track here whether we'll need to call disconnect_event() after the socket closes.
45         need_disconnect_event: bool,
46         disconnect: bool,
47         id: u64,
48 }
49 impl Connection {
50         async fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut write_event: mpsc::Receiver<()>) {
51                 let peer_manager_ref = peer_manager.clone();
52                 let mut buf = [0; 8192];
53                 loop {
54                         macro_rules! shutdown_socket {
55                                 ($err: expr) => { {
56                                         println!("Disconnecting peer due to {}!", $err);
57                                         break;
58                                 } }
59                         }
60
61                         // Whenever we want to block, we have to at least select with the write_event Receiver,
62                         // which is used by the SocketDescriptor to wake us up if we need to shut down the
63                         // socket or if we need to generate a write_event.
64                         macro_rules! select_write_ev {
65                                 ($v: expr) => { {
66                                         assert!($v.is_some()); // We can't have dropped the sending end, its in the us Arc!
67                                         if us.lock().unwrap().disconnect {
68                                                 shutdown_socket!("disconnect_socket() call from RL");
69                                         }
70                                         if let Err(e) = peer_manager.write_event(&mut SocketDescriptor::new(us.clone())) {
71                                                 shutdown_socket!(e);
72                                         }
73                                 } }
74                         }
75
76                         tokio::select! {
77                                 v = write_event.recv() => select_write_ev!(v),
78                                 read = reader.read(&mut buf) => match read {
79                                         Ok(0) => {
80                                                 println!("Connection closed");
81                                                 break;
82                                         },
83                                         Ok(len) => {
84                                                 if let Some(blocker) = {
85                                                         let mut lock = us.lock().unwrap();
86                                                         if lock.disconnect {
87                                                                 shutdown_socket!("disconnect_socket() call from RL");
88                                                         }
89                                                         if lock.read_paused {
90                                                                 let (sender, blocker) = oneshot::channel();
91                                                                 lock.read_blocker = Some(sender);
92                                                                 Some(blocker)
93                                                         } else { None }
94                                                 } {
95                                                         tokio::select! {
96                                                                 res = blocker => {
97                                                                         res.unwrap(); // We should never drop the sender without sending () into it!
98                                                                         if us.lock().unwrap().disconnect {
99                                                                                 shutdown_socket!("disconnect_socket() call from RL");
100                                                                         }
101                                                                 },
102                                                                 v = write_event.recv() => select_write_ev!(v),
103                                                         }
104                                                 }
105                                                 match peer_manager.read_event(&mut SocketDescriptor::new(Arc::clone(&us)), &buf[0..len]) {
106                                                         Ok(pause_read) => {
107                                                                 if pause_read {
108                                                                         let mut lock = us.lock().unwrap();
109                                                                         lock.read_paused = true;
110                                                                 }
111
112                                                                 match us.lock().unwrap().event_notify.try_send(()) {
113                                                                         Ok(_) => {},
114                                                                         Err(mpsc::error::TrySendError::Full(_)) => {
115                                                                                 // Ignore full errors as we just need them to poll after this point, so if the user
116                                                                                 // hasn't received the last send yet, it doesn't matter.
117                                                                         },
118                                                                         _ => panic!()
119                                                                 }
120                                                         },
121                                                         Err(e) => shutdown_socket!(e),
122                                                 }
123                                         },
124                                         Err(e) => {
125                                                 println!("Connection closed: {}", e);
126                                                 break;
127                                         },
128                                 },
129                         }
130                 }
131                 let writer_option = us.lock().unwrap().writer.take();
132                 if let Some(mut writer) = writer_option {
133                         writer.shutdown().await.expect("We should be able to shutdown() a socket, even if it is already disconnected");
134                 }
135                 if us.lock().unwrap().need_disconnect_event {
136                         peer_manager_ref.disconnect_event(&SocketDescriptor::new(Arc::clone(&us)));
137                         match us.lock().unwrap().event_notify.try_send(()) {
138                                 Ok(_) => {},
139                                 Err(mpsc::error::TrySendError::Full(_)) => {
140                                         // Ignore full errors as we just need them to poll after this point, so if the user
141                                         // hasn't received the last send yet, it doesn't matter.
142                                 },
143                                 _ => panic!()
144                         }
145                 }
146         }
147
148         fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
149                 // We only ever need a channel of depth 1 here: if we returned a non-full write to the
150                 // PeerManager, we will eventually get notified that there is room in the socket to write
151                 // new bytes, which will generate an event. That event will be popped off the queue before
152                 // we call write_event, ensuring that we have room to push a new () if, during the
153                 // write_event() call, send_data() returns a non-full write.
154                 let (write_avail, receiver) = mpsc::channel(1);
155                 let (reader, writer) = io::split(stream);
156
157                 (reader, receiver,
158                 Arc::new(Mutex::new(Self {
159                         writer: Some(writer), event_notify, write_avail,
160                         read_blocker: None, read_paused: false, need_disconnect_event: true, disconnect: false,
161                         id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
162                 })))
163         }
164
165         /// Process incoming messages and feed outgoing messages on the provided socket generated by
166         /// accepting an incoming connection (by scheduling futures with tokio::spawn).
167         ///
168         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
169         /// ChannelManager and ChannelMonitor objects.
170         pub async fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) {
171                 let (reader, receiver, us) = Self::new(event_notify, stream);
172
173                 if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
174                         tokio::spawn(Self::schedule_read(peer_manager, us, reader, receiver)).await;
175                 }
176         }
177
178         /// Process incoming messages and feed outgoing messages on the provided socket generated by
179         /// making an outbound connection which is expected to be accepted by a peer with the given
180         /// public key (by scheduling futures with tokio::spawn).
181         ///
182         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
183         /// ChannelManager and ChannelMonitor objects.
184         pub async fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) {
185                 let (reader, receiver, us) = Self::new(event_notify, stream);
186
187 println!("Calling noc...");
188                 if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
189                         if let Err(_) = us.lock().unwrap().writer.as_mut().unwrap().write_all(&initial_send).await {
190                                 // Note that we will skip disconnect_event here, in accordance with the PeerManager
191                                 // requirements, as disconnect_event is called by the schedule_read Future.
192                                 println!("Failed to write first full message to socket!");
193                                 return;
194                         }
195                         tokio::spawn(Self::schedule_read(peer_manager, us, reader, receiver)).await;
196                 }
197         }
198
199         /// Process incoming messages and feed outgoing messages on a new connection made to the given
200         /// socket address which is expected to be accepted by a peer with the given public key (by
201         /// scheduling futures with tokio::spawn).
202         ///
203         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
204         /// ChannelManager and ChannelMonitor objects.
205         pub async fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) {
206                 let connect_timeout = time::delay_for(Duration::from_secs(10));
207                 let connect_fut = TcpStream::connect(&addr);
208                 tokio::select! {
209                         _ = connect_timeout => { },
210                         res = connect_fut => {
211                                 if let Ok(stream) = res {
212                                         Connection::setup_outbound(peer_manager, event_notify, their_node_id, stream).await;
213                                 }
214                         },
215                 };
216         }
217 }
218
219 const SOCK_WAKER_VTABLE: task::RawWakerVTable =
220         task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker);
221
222 fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker {
223         descriptor_to_waker(orig_ptr as *const SocketDescriptor)
224 }
225 fn wake_socket_waker(orig_ptr: *const ()) {
226         wake_socket_waker_by_ref(orig_ptr);
227         drop_socket_waker(orig_ptr);
228 }
229 fn wake_socket_waker_by_ref(orig_ptr: *const ()) {
230         let descriptor = orig_ptr as *const SocketDescriptor;
231         // An error should be fine. Most likely we got two send_datas in a row, both of which failed to
232         // fully write, but we only need to provide a write_event() once. Otherwise, the sending thread
233         // may have already gone away due to a socket close, in which case there's nothing to wake up
234         // anyway.
235 println!("WAKE");
236         let _ = unsafe { (*descriptor).write_avail.clone() }.try_send(());
237 }
238 fn drop_socket_waker(orig_ptr: *const ()) {
239         let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut SocketDescriptor) };
240         // _orig_box is now dropped
241 }
242 fn descriptor_to_waker(descriptor: *const SocketDescriptor) -> task::RawWaker {
243         let new_box = Box::leak(Box::new(unsafe { (*descriptor).clone() }));
244         let new_ptr = new_box as *const SocketDescriptor;
245         task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE)
246 }
247
248 pub struct SocketDescriptor {
249         conn: Arc<Mutex<Connection>>,
250         // Ideally we'd just lock conn and push to the write_avail there, but sadly tokio calls our
251         // waker irrespective of available space ;
252         write_avail: mpsc::Sender<()>,
253         id: u64,
254 }
255 impl SocketDescriptor {
256         fn new(conn: Arc<Mutex<Connection>>) -> Self {
257                 let (id, write_avail) = {
258                         let us = conn.lock().unwrap();
259                         (us.id, us.write_avail.clone())
260                 };
261                 Self { conn, write_avail, id }
262         }
263 }
264 impl peer_handler::SocketDescriptor for SocketDescriptor {
265         fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
266 println!("locking...");
267                 let mut us = self.conn.lock().unwrap();
268 println!("done");
269                 if us.writer.is_none() {
270 println!("NO WRITER");
271                         // The writer gets take()n when its time to shut down, so just fast-return 0 here.
272                         return 0;
273                 }
274
275                 if resume_read {
276                         if let Some(sender) = us.read_blocker.take() {
277                                 sender.send(()).unwrap();
278                         }
279                         us.read_paused = false;
280                 }
281                 if data.is_empty() { return 0; }
282                 let waker = unsafe { task::Waker::from_raw(descriptor_to_waker(self)) };
283                 let mut ctx = task::Context::from_waker(&waker);
284                 let mut written_len = 0;
285                 loop {
286 println!("calling poll_write...");
287                         match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
288                                 task::Poll::Ready(Ok(res)) => {
289 println!("WRITE {}", res);
290                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
291                                         // know how to handle it if it does (cause it should be a Poll::Pending
292                                         // instead):
293                                         assert_ne!(res, 0);
294                                         written_len += res;
295                                         if written_len == data.len() { return written_len; }
296                                 },
297                                 task::Poll::Ready(Err(e)) => {
298 println!("DEAD SOCKET {:?}", e);
299                                         // The tokio docs *seem* to indicate this can't happen, and I certainly don't
300                                         // know how to handle it if it does (cause it should be a Poll::Pending
301                                         // instead):
302                                         assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
303                                         // Probably we've already been closed, just return what we have and let the
304                                         // read thread handle closing logic.
305                                         return written_len;
306                                 },
307                                 task::Poll::Pending => {
308 println!("PENDING WRITE NOW");
309                                         // We're queued up for a write event now, but we need to make sure we also
310                                         // pause read given we're now waiting on the remote end to ACK (and in
311                                         // accordance with the send_data() docs).
312                                         us.read_paused = true;
313                                         return written_len;
314                                 },
315                         }
316                 }
317         }
318
319         fn disconnect_socket(&mut self) {
320                 let mut us = self.conn.lock().unwrap();
321                 us.need_disconnect_event = false;
322                 us.disconnect = true;
323                 us.read_paused = true;
324                 // Wake up the sending thread, assuming its still alive
325                 let _ = us.write_avail.try_send(());
326                 // TODO: There's a race where we don't meet the requirements of disconnect_socket if the
327                 // read task is about to call a PeerManager function (eg read_event or write_event).
328                 // Ideally we need to release the us lock and block until we have confirmation from the
329                 // read task that it has broken out of its main loop.
330         }
331 }
332 impl Clone for SocketDescriptor {
333         fn clone(&self) -> Self {
334                 Self {
335                         conn: Arc::clone(&self.conn),
336                         write_avail: self.write_avail.clone(),
337                         id: self.id,
338                 }
339         }
340 }
341 impl Eq for SocketDescriptor {}
342 impl PartialEq for SocketDescriptor {
343         fn eq(&self, o: &Self) -> bool {
344                 self.id == o.id
345         }
346 }
347 impl Hash for SocketDescriptor {
348         fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
349                 self.id.hash(state);
350         }
351 }
352