Fix incorrect docs around disconnect in peer_handler + rename fns
[rust-lightning] / lightning-net-tokio / src / lib.rs
1 extern crate bytes;
2 extern crate tokio;
3 extern crate tokio_codec;
4 extern crate futures;
5 extern crate lightning;
6 extern crate secp256k1;
7
8 use bytes::BufMut;
9
10 use futures::future;
11 use futures::future::Future;
12 use futures::{AsyncSink, Stream, Sink};
13 use futures::sync::mpsc;
14
15 use secp256k1::key::PublicKey;
16
17 use tokio::timer::Delay;
18 use tokio::net::TcpStream;
19
20 use lightning::ln::peer_handler;
21 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
22 use lightning::ln::msgs::ChannelMessageHandler;
23
24 use std::mem;
25 use std::net::SocketAddr;
26 use std::sync::{Arc, Mutex};
27 use std::sync::atomic::{AtomicU64, Ordering};
28 use std::time::{Duration, Instant};
29 use std::vec::Vec;
30 use std::hash::Hash;
31
32 static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
33
34 /// A connection to a remote peer. Can be constructed either as a remote connection using
35 /// Connection::setup_outbound o
36 pub struct Connection {
37         writer: Option<mpsc::Sender<bytes::Bytes>>,
38         event_notify: mpsc::Sender<()>,
39         pending_read: Vec<u8>,
40         read_blocker: Option<futures::sync::oneshot::Sender<Result<(), ()>>>,
41         read_paused: bool,
42         need_disconnect: bool,
43         id: u64,
44 }
45 impl Connection {
46         fn schedule_read<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, us: Arc<Mutex<Self>>, reader: futures::stream::SplitStream<tokio_codec::Framed<TcpStream, tokio_codec::BytesCodec>>) {
47                 let us_ref = us.clone();
48                 let us_close_ref = us.clone();
49                 let peer_manager_ref = peer_manager.clone();
50                 tokio::spawn(reader.for_each(move |b| {
51                         let pending_read = b.to_vec();
52                         {
53                                 let mut lock = us_ref.lock().unwrap();
54                                 assert!(lock.pending_read.is_empty());
55                                 if lock.read_paused {
56                                         lock.pending_read = pending_read;
57                                         let (sender, blocker) = futures::sync::oneshot::channel();
58                                         lock.read_blocker = Some(sender);
59                                         return future::Either::A(blocker.then(|_| { Ok(()) }));
60                                 }
61                         }
62                         //TODO: There's a race where we don't meet the requirements of socket_disconnected if its
63                         //called right here, after we release the us_ref lock in the scope above, but before we
64                         //call read_event!
65                         match peer_manager.read_event(&mut SocketDescriptor::new(us_ref.clone(), peer_manager.clone()), pending_read) {
66                                 Ok(pause_read) => {
67                                         if pause_read {
68                                                 let mut lock = us_ref.lock().unwrap();
69                                                 lock.read_paused = true;
70                                         }
71                                 },
72                                 Err(e) => {
73                                         us_ref.lock().unwrap().need_disconnect = false;
74                                         return future::Either::B(future::result(Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e))));
75                                 }
76                         }
77
78                         if let Err(e) = us_ref.lock().unwrap().event_notify.try_send(()) {
79                                 // Ignore full errors as we just need them to poll after this point, so if the user
80                                 // hasn't received the last send yet, it doesn't matter.
81                                 assert!(e.is_full());
82                         }
83
84                         future::Either::B(future::result(Ok(())))
85                 }).then(move |_| {
86                         if us_close_ref.lock().unwrap().need_disconnect {
87                                 peer_manager_ref.socket_disconnected(&SocketDescriptor::new(us_close_ref, peer_manager_ref.clone()));
88                                 println!("Peer disconnected!");
89                         } else {
90                                 println!("We disconnected peer!");
91                         }
92                         Ok(())
93                 }));
94         }
95
96         fn new(event_notify: mpsc::Sender<()>, stream: TcpStream) -> (futures::stream::SplitStream<tokio_codec::Framed<TcpStream, tokio_codec::BytesCodec>>, Arc<Mutex<Self>>) {
97                 let (writer, reader) = tokio_codec::Framed::new(stream, tokio_codec::BytesCodec::new()).split();
98                 let (send_sink, send_stream) = mpsc::channel(3);
99                 tokio::spawn(writer.send_all(send_stream.map_err(|_| -> std::io::Error {
100                         unreachable!();
101                 })).then(|_| {
102                         future::result(Ok(()))
103                 }));
104                 let us = Arc::new(Mutex::new(Self { writer: Some(send_sink), event_notify, pending_read: Vec::new(), read_blocker: None, read_paused: false, need_disconnect: true, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }));
105
106                 (reader, us)
107         }
108
109         /// Process incoming messages and feed outgoing messages on the provided socket generated by
110         /// accepting an incoming connection (by scheduling futures with tokio::spawn).
111         ///
112         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
113         /// ChannelManager and ChannelMonitor objects.
114         pub fn setup_inbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, event_notify: mpsc::Sender<()>, stream: TcpStream) {
115                 let (reader, us) = Self::new(event_notify, stream);
116
117                 if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone(), peer_manager.clone())) {
118                         Self::schedule_read(peer_manager, us, reader);
119                 }
120         }
121
122         /// Process incoming messages and feed outgoing messages on the provided socket generated by
123         /// making an outbound connection which is expected to be accepted by a peer with the given
124         /// public key (by scheduling futures with tokio::spawn).
125         ///
126         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
127         /// ChannelManager and ChannelMonitor objects.
128         pub fn setup_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, stream: TcpStream) {
129                 let (reader, us) = Self::new(event_notify, stream);
130
131                 if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone(), peer_manager.clone())) {
132                         if SocketDescriptor::new(us.clone(), peer_manager.clone()).send_data(&initial_send, true) == initial_send.len() {
133                                 Self::schedule_read(peer_manager, us, reader);
134                         } else {
135                                 println!("Failed to write first full message to socket!");
136                         }
137                 }
138         }
139
140         /// Process incoming messages and feed outgoing messages on a new connection made to the given
141         /// socket address which is expected to be accepted by a peer with the given public key (by
142         /// scheduling futures with tokio::spawn).
143         ///
144         /// You should poll the Receive end of event_notify and call get_and_clear_pending_events() on
145         /// ChannelManager and ChannelMonitor objects.
146         pub fn connect_outbound<CMH: ChannelMessageHandler + 'static>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>, event_notify: mpsc::Sender<()>, their_node_id: PublicKey, addr: SocketAddr) {
147                 let connect_timeout = Delay::new(Instant::now() + Duration::from_secs(10)).then(|_| {
148                         future::err(std::io::Error::new(std::io::ErrorKind::TimedOut, "timeout reached"))
149                 });
150                 tokio::spawn(TcpStream::connect(&addr).select(connect_timeout)
151                         .and_then(move |stream| {
152                                 Connection::setup_outbound(peer_manager, event_notify, their_node_id, stream.0);
153                                 future::ok(())
154                         }).or_else(|_| {
155                                 //TODO: return errors somehow
156                                 future::ok(())
157                         }));
158         }
159 }
160
161 pub struct SocketDescriptor<CMH: ChannelMessageHandler + 'static> {
162         conn: Arc<Mutex<Connection>>,
163         id: u64,
164         peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>,
165 }
166 impl<CMH: ChannelMessageHandler> SocketDescriptor<CMH> {
167         fn new(conn: Arc<Mutex<Connection>>, peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor<CMH>, Arc<CMH>>>) -> Self {
168                 let id = conn.lock().unwrap().id;
169                 Self { conn, id, peer_manager }
170         }
171 }
172 impl<CMH: ChannelMessageHandler> peer_handler::SocketDescriptor for SocketDescriptor<CMH> {
173         fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
174                 macro_rules! schedule_read {
175                         ($us_ref: expr) => {
176                                 tokio::spawn(future::lazy(move || -> Result<(), ()> {
177                                         let mut read_data = Vec::new();
178                                         {
179                                                 let mut us = $us_ref.conn.lock().unwrap();
180                                                 mem::swap(&mut read_data, &mut us.pending_read);
181                                         }
182                                         if !read_data.is_empty() {
183                                                 let mut us_clone = $us_ref.clone();
184                                                 match $us_ref.peer_manager.read_event(&mut us_clone, read_data) {
185                                                         Ok(pause_read) => {
186                                                                 if pause_read { return Ok(()); }
187                                                         },
188                                                         Err(_) => {
189                                                                 //TODO: Not actually sure how to do this
190                                                                 return Ok(());
191                                                         }
192                                                 }
193                                         }
194                                         let mut us = $us_ref.conn.lock().unwrap();
195                                         if let Some(sender) = us.read_blocker.take() {
196                                                 sender.send(Ok(())).unwrap();
197                                         }
198                                         us.read_paused = false;
199                                         if let Err(e) = us.event_notify.try_send(()) {
200                                                 // Ignore full errors as we just need them to poll after this point, so if the user
201                                                 // hasn't received the last send yet, it doesn't matter.
202                                                 assert!(e.is_full());
203                                         }
204                                         Ok(())
205                                 }));
206                         }
207                 }
208
209                 let mut us = self.conn.lock().unwrap();
210                 if resume_read {
211                         let us_ref = self.clone();
212                         schedule_read!(us_ref);
213                 }
214                 if data.is_empty() { return 0; }
215                 if us.writer.is_none() {
216                         us.read_paused = true;
217                         return 0;
218                 }
219
220                 let mut bytes = bytes::BytesMut::with_capacity(data.len());
221                 bytes.put(data);
222                 let write_res = us.writer.as_mut().unwrap().start_send(bytes.freeze());
223                 match write_res {
224                         Ok(res) => {
225                                 match res {
226                                         AsyncSink::Ready => {
227                                                 data.len()
228                                         },
229                                         AsyncSink::NotReady(_) => {
230                                                 us.read_paused = true;
231                                                 let us_ref = self.clone();
232                                                 tokio::spawn(us.writer.take().unwrap().flush().then(move |writer_res| -> Result<(), ()> {
233                                                         if let Ok(writer) = writer_res {
234                                                                 {
235                                                                         let mut us = us_ref.conn.lock().unwrap();
236                                                                         us.writer = Some(writer);
237                                                                 }
238                                                                 schedule_read!(us_ref);
239                                                         } // we'll fire the disconnect event on the socket reader end
240                                                         Ok(())
241                                                 }));
242                                                 0
243                                         }
244                                 }
245                         },
246                         Err(_) => {
247                                 // We'll fire the disconnected event on the socket reader end
248                                 0
249                         },
250                 }
251         }
252
253         fn disconnect_socket(&mut self) {
254                 let mut us = self.conn.lock().unwrap();
255                 us.need_disconnect = true;
256                 us.read_paused = true;
257         }
258 }
259 impl<CMH: ChannelMessageHandler> Clone for SocketDescriptor<CMH> {
260         fn clone(&self) -> Self {
261                 Self {
262                         conn: Arc::clone(&self.conn),
263                         id: self.id,
264                         peer_manager: Arc::clone(&self.peer_manager),
265                 }
266         }
267 }
268 impl<CMH: ChannelMessageHandler> Eq for SocketDescriptor<CMH> {}
269 impl<CMH: ChannelMessageHandler> PartialEq for SocketDescriptor<CMH> {
270         fn eq(&self, o: &Self) -> bool {
271                 self.id == o.id
272         }
273 }
274 impl<CMH: ChannelMessageHandler> Hash for SocketDescriptor<CMH> {
275         fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
276                 self.id.hash(state);
277         }
278 }
279