Add test for onion failure processing
[rust-lightning] / src / ln / channelmanager.rs
1 //! The top-level channel management and payment tracking stuff lives here.
2 //!
3 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
4 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
5 //! upon reconnect to the relevant peer(s).
6 //!
7 //! It does not manage routing logic (see ln::router for that) nor does it manage constructing
8 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
9 //! imply it needs to fail HTLCs/payments/channels it manages).
10
11 use bitcoin::blockdata::block::BlockHeader;
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::blockdata::constants::genesis_block;
14 use bitcoin::network::constants::Network;
15 use bitcoin::network::serialize::BitcoinHash;
16 use bitcoin::util::hash::Sha256dHash;
17
18 use secp256k1::key::{SecretKey,PublicKey};
19 use secp256k1::{Secp256k1,Message};
20 use secp256k1::ecdh::SharedSecret;
21 use secp256k1;
22
23 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
24 use chain::transaction::OutPoint;
25 use ln::channel::{Channel, ChannelError, ChannelKeys};
26 use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
27 use ln::router::{Route,RouteHop};
28 use ln::msgs;
29 use ln::msgs::{HandleError,ChannelMessageHandler};
30 use util::{byte_utils, events, internal_traits, rng};
31 use util::sha2::Sha256;
32 use util::ser::{Readable, Writeable};
33 use util::chacha20poly1305rfc::ChaCha20;
34 use util::logger::Logger;
35 use util::errors::APIError;
36
37 use crypto;
38 use crypto::mac::{Mac,MacResult};
39 use crypto::hmac::Hmac;
40 use crypto::digest::Digest;
41 use crypto::symmetriccipher::SynchronousStreamCipher;
42
43 use std::{ptr, mem};
44 use std::collections::HashMap;
45 use std::collections::hash_map;
46 use std::io::Cursor;
47 use std::sync::{Mutex,MutexGuard,Arc};
48 use std::sync::atomic::{AtomicUsize, Ordering};
49 use std::time::{Instant,Duration};
50
51 /// We hold various information about HTLC relay in the HTLC objects in Channel itself:
52 ///
53 /// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
54 /// forward the HTLC with information it will give back to us when it does so, or if it should Fail
55 /// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
56 ///
57 /// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
58 /// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
59 /// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
60 /// the HTLC backwards along the relevant path).
61 /// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
62 /// our payment, which we can use to decode errors or inform the user that the payment was sent.
63 mod channel_held_info {
64         use ln::msgs;
65         use ln::router::Route;
66         use secp256k1::key::SecretKey;
67         use secp256k1::ecdh::SharedSecret;
68
69         /// Stores the info we will need to send when we want to forward an HTLC onwards
70         #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
71         pub struct PendingForwardHTLCInfo {
72                 pub(super) onion_packet: Option<msgs::OnionPacket>,
73                 pub(super) incoming_shared_secret: SharedSecret,
74                 pub(super) payment_hash: [u8; 32],
75                 pub(super) short_channel_id: u64,
76                 pub(super) amt_to_forward: u64,
77                 pub(super) outgoing_cltv_value: u32,
78         }
79
80         #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
81         pub enum HTLCFailureMsg {
82                 Relay(msgs::UpdateFailHTLC),
83                 Malformed(msgs::UpdateFailMalformedHTLC),
84         }
85
86         /// Stores whether we can't forward an HTLC or relevant forwarding info
87         #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
88         pub enum PendingHTLCStatus {
89                 Forward(PendingForwardHTLCInfo),
90                 Fail(HTLCFailureMsg),
91         }
92
93         /// Tracks the inbound corresponding to an outbound HTLC
94         #[derive(Clone)]
95         pub struct HTLCPreviousHopData {
96                 pub(super) short_channel_id: u64,
97                 pub(super) htlc_id: u64,
98                 pub(super) incoming_packet_shared_secret: SharedSecret,
99         }
100
101         /// Tracks the inbound corresponding to an outbound HTLC
102         #[derive(Clone)]
103         pub enum HTLCSource {
104                 PreviousHopData(HTLCPreviousHopData),
105                 OutboundRoute {
106                         route: Route,
107                         session_priv: SecretKey,
108                         /// Technically we can recalculate this from the route, but we cache it here to avoid
109                         /// doing a double-pass on route when we get a failure back
110                         first_hop_htlc_msat: u64,
111                 },
112         }
113         #[cfg(test)]
114         impl HTLCSource {
115                 pub fn dummy() -> Self {
116                         HTLCSource::OutboundRoute {
117                                 route: Route { hops: Vec::new() },
118                                 session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(),
119                                 first_hop_htlc_msat: 0,
120                         }
121                 }
122         }
123
124         #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
125         pub(crate) enum HTLCFailReason {
126                 ErrorPacket {
127                         err: msgs::OnionErrorPacket,
128                 },
129                 Reason {
130                         failure_code: u16,
131                         data: Vec<u8>,
132                 }
133         }
134 }
135 pub(super) use self::channel_held_info::*;
136
137 struct MsgHandleErrInternal {
138         err: msgs::HandleError,
139         needs_channel_force_close: bool,
140 }
141 impl MsgHandleErrInternal {
142         #[inline]
143         fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
144                 Self {
145                         err: HandleError {
146                                 err,
147                                 action: Some(msgs::ErrorAction::SendErrorMessage {
148                                         msg: msgs::ErrorMessage {
149                                                 channel_id,
150                                                 data: err.to_string()
151                                         },
152                                 }),
153                         },
154                         needs_channel_force_close: false,
155                 }
156         }
157         #[inline]
158         fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self {
159                 Self {
160                         err: HandleError {
161                                 err,
162                                 action: Some(msgs::ErrorAction::SendErrorMessage {
163                                         msg: msgs::ErrorMessage {
164                                                 channel_id,
165                                                 data: err.to_string()
166                                         },
167                                 }),
168                         },
169                         needs_channel_force_close: true,
170                 }
171         }
172         #[inline]
173         fn from_maybe_close(err: msgs::HandleError) -> Self {
174                 Self { err, needs_channel_force_close: true }
175         }
176         #[inline]
177         fn from_no_close(err: msgs::HandleError) -> Self {
178                 Self { err, needs_channel_force_close: false }
179         }
180         #[inline]
181         fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
182                 Self {
183                         err: match err {
184                                 ChannelError::Ignore(msg) => HandleError {
185                                         err: msg,
186                                         action: Some(msgs::ErrorAction::IgnoreError),
187                                 },
188                                 ChannelError::Close(msg) => HandleError {
189                                         err: msg,
190                                         action: Some(msgs::ErrorAction::SendErrorMessage {
191                                                 msg: msgs::ErrorMessage {
192                                                         channel_id,
193                                                         data: msg.to_string()
194                                                 },
195                                         }),
196                                 },
197                         },
198                         needs_channel_force_close: false,
199                 }
200         }
201         #[inline]
202         fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
203                 Self {
204                         err: match err {
205                                 ChannelError::Ignore(msg) => HandleError {
206                                         err: msg,
207                                         action: Some(msgs::ErrorAction::IgnoreError),
208                                 },
209                                 ChannelError::Close(msg) => HandleError {
210                                         err: msg,
211                                         action: Some(msgs::ErrorAction::SendErrorMessage {
212                                                 msg: msgs::ErrorMessage {
213                                                         channel_id,
214                                                         data: msg.to_string()
215                                                 },
216                                         }),
217                                 },
218                         },
219                         needs_channel_force_close: true,
220                 }
221         }
222 }
223
224 /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
225 /// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
226 /// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
227 /// probably increase this significantly.
228 const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50;
229
230 struct HTLCForwardInfo {
231         prev_short_channel_id: u64,
232         prev_htlc_id: u64,
233         forward_info: PendingForwardHTLCInfo,
234 }
235
236 struct ChannelHolder {
237         by_id: HashMap<[u8; 32], Channel>,
238         short_to_id: HashMap<u64, [u8; 32]>,
239         next_forward: Instant,
240         /// short channel id -> forward infos. Key of 0 means payments received
241         /// Note that while this is held in the same mutex as the channels themselves, no consistency
242         /// guarantees are made about there existing a channel with the short id here, nor the short
243         /// ids in the PendingForwardHTLCInfo!
244         forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
245         /// Note that while this is held in the same mutex as the channels themselves, no consistency
246         /// guarantees are made about the channels given here actually existing anymore by the time you
247         /// go to read them!
248         claimable_htlcs: HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
249 }
250 struct MutChannelHolder<'a> {
251         by_id: &'a mut HashMap<[u8; 32], Channel>,
252         short_to_id: &'a mut HashMap<u64, [u8; 32]>,
253         next_forward: &'a mut Instant,
254         forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
255         claimable_htlcs: &'a mut HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
256 }
257 impl ChannelHolder {
258         fn borrow_parts(&mut self) -> MutChannelHolder {
259                 MutChannelHolder {
260                         by_id: &mut self.by_id,
261                         short_to_id: &mut self.short_to_id,
262                         next_forward: &mut self.next_forward,
263                         forward_htlcs: &mut self.forward_htlcs,
264                         claimable_htlcs: &mut self.claimable_htlcs,
265                 }
266         }
267 }
268
269 #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
270 const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
271
272 /// Manager which keeps track of a number of channels and sends messages to the appropriate
273 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
274 ///
275 /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
276 /// to individual Channels.
277 pub struct ChannelManager {
278         genesis_hash: Sha256dHash,
279         fee_estimator: Arc<FeeEstimator>,
280         monitor: Arc<ManyChannelMonitor>,
281         chain_monitor: Arc<ChainWatchInterface>,
282         tx_broadcaster: Arc<BroadcasterInterface>,
283
284         announce_channels_publicly: bool,
285         fee_proportional_millionths: u32,
286         latest_block_height: AtomicUsize,
287         secp_ctx: Secp256k1<secp256k1::All>,
288
289         channel_state: Mutex<ChannelHolder>,
290         our_network_key: SecretKey,
291
292         pending_events: Mutex<Vec<events::Event>>,
293
294         logger: Arc<Logger>,
295 }
296
297 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
298 /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
299 /// ie the node we forwarded the payment on to should always have enough room to reliably time out
300 /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
301 /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
302 const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
303 const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
304
305 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
306 // if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
307 // HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the
308 // CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC.
309 #[deny(const_err)]
310 #[allow(dead_code)]
311 const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER;
312
313 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
314 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
315 #[deny(const_err)]
316 #[allow(dead_code)]
317 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
318
319 macro_rules! secp_call {
320         ( $res: expr, $err: expr ) => {
321                 match $res {
322                         Ok(key) => key,
323                         Err(_) => return Err($err),
324                 }
325         };
326 }
327
328 struct OnionKeys {
329         #[cfg(test)]
330         shared_secret: SharedSecret,
331         #[cfg(test)]
332         blinding_factor: [u8; 32],
333         ephemeral_pubkey: PublicKey,
334         rho: [u8; 32],
335         mu: [u8; 32],
336 }
337
338 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
339 pub struct ChannelDetails {
340         /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
341         /// thereafter this is the txid of the funding transaction xor the funding transaction output).
342         /// Note that this means this value is *not* persistent - it can change once during the
343         /// lifetime of the channel.
344         pub channel_id: [u8; 32],
345         /// The position of the funding transaction in the chain. None if the funding transaction has
346         /// not yet been confirmed and the channel fully opened.
347         pub short_channel_id: Option<u64>,
348         /// The node_id of our counterparty
349         pub remote_network_id: PublicKey,
350         /// The value, in satoshis, of this channel as appears in the funding output
351         pub channel_value_satoshis: u64,
352         /// The user_id passed in to create_channel, or 0 if the channel was inbound.
353         pub user_id: u64,
354 }
355
356 impl ChannelManager {
357         /// Constructs a new ChannelManager to hold several channels and route between them.
358         ///
359         /// This is the main "logic hub" for all channel-related actions, and implements
360         /// ChannelMessageHandler.
361         ///
362         /// fee_proportional_millionths is an optional fee to charge any payments routed through us.
363         /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
364         ///
365         /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
366         pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
367                 let secp_ctx = Secp256k1::new();
368
369                 let res = Arc::new(ChannelManager {
370                         genesis_hash: genesis_block(network).header.bitcoin_hash(),
371                         fee_estimator: feeest.clone(),
372                         monitor: monitor.clone(),
373                         chain_monitor,
374                         tx_broadcaster,
375
376                         announce_channels_publicly,
377                         fee_proportional_millionths,
378                         latest_block_height: AtomicUsize::new(0), //TODO: Get an init value (generally need to replay recent chain on chain_monitor registration)
379                         secp_ctx,
380
381                         channel_state: Mutex::new(ChannelHolder{
382                                 by_id: HashMap::new(),
383                                 short_to_id: HashMap::new(),
384                                 next_forward: Instant::now(),
385                                 forward_htlcs: HashMap::new(),
386                                 claimable_htlcs: HashMap::new(),
387                         }),
388                         our_network_key,
389
390                         pending_events: Mutex::new(Vec::new()),
391
392                         logger,
393                 });
394                 let weak_res = Arc::downgrade(&res);
395                 res.chain_monitor.register_listener(weak_res);
396                 Ok(res)
397         }
398
399         /// Creates a new outbound channel to the given remote node and with the given value.
400         ///
401         /// user_id will be provided back as user_channel_id in FundingGenerationReady and
402         /// FundingBroadcastSafe events to allow tracking of which events correspond with which
403         /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
404         /// may wish to avoid using 0 for user_id here.
405         ///
406         /// If successful, will generate a SendOpenChannel event, so you should probably poll
407         /// PeerManager::process_events afterwards.
408         ///
409         /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k
410         pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
411                 let chan_keys = if cfg!(feature = "fuzztarget") {
412                         ChannelKeys {
413                                 funding_key:               SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
414                                 revocation_base_key:       SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
415                                 payment_base_key:          SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
416                                 delayed_payment_base_key:  SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
417                                 htlc_base_key:             SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
418                                 channel_close_key:         SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
419                                 channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
420                                 commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
421                         }
422                 } else {
423                         let mut key_seed = [0u8; 32];
424                         rng::fill_bytes(&mut key_seed);
425                         match ChannelKeys::new_from_seed(&key_seed) {
426                                 Ok(key) => key,
427                                 Err(_) => panic!("RNG is busted!")
428                         }
429                 };
430
431                 let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?;
432                 let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
433                 let mut channel_state = self.channel_state.lock().unwrap();
434                 match channel_state.by_id.entry(channel.channel_id()) {
435                         hash_map::Entry::Occupied(_) => {
436                                 if cfg!(feature = "fuzztarget") {
437                                         return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
438                                 } else {
439                                         panic!("RNG is bad???");
440                                 }
441                         },
442                         hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
443                 }
444
445                 let mut events = self.pending_events.lock().unwrap();
446                 events.push(events::Event::SendOpenChannel {
447                         node_id: their_network_key,
448                         msg: res,
449                 });
450                 Ok(())
451         }
452
453         /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
454         /// more information.
455         pub fn list_channels(&self) -> Vec<ChannelDetails> {
456                 let channel_state = self.channel_state.lock().unwrap();
457                 let mut res = Vec::with_capacity(channel_state.by_id.len());
458                 for (channel_id, channel) in channel_state.by_id.iter() {
459                         res.push(ChannelDetails {
460                                 channel_id: (*channel_id).clone(),
461                                 short_channel_id: channel.get_short_channel_id(),
462                                 remote_network_id: channel.get_their_node_id(),
463                                 channel_value_satoshis: channel.get_value_satoshis(),
464                                 user_id: channel.get_user_id(),
465                         });
466                 }
467                 res
468         }
469
470         /// Gets the list of usable channels, in random order. Useful as an argument to
471         /// Router::get_route to ensure non-announced channels are used.
472         pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
473                 let channel_state = self.channel_state.lock().unwrap();
474                 let mut res = Vec::with_capacity(channel_state.by_id.len());
475                 for (channel_id, channel) in channel_state.by_id.iter() {
476                         // Note we use is_live here instead of usable which leads to somewhat confused
477                         // internal/external nomenclature, but that's ok cause that's probably what the user
478                         // really wanted anyway.
479                         if channel.is_live() {
480                                 res.push(ChannelDetails {
481                                         channel_id: (*channel_id).clone(),
482                                         short_channel_id: channel.get_short_channel_id(),
483                                         remote_network_id: channel.get_their_node_id(),
484                                         channel_value_satoshis: channel.get_value_satoshis(),
485                                         user_id: channel.get_user_id(),
486                                 });
487                         }
488                 }
489                 res
490         }
491
492         /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
493         /// will be accepted on the given channel, and after additional timeout/the closing of all
494         /// pending HTLCs, the channel will be closed on chain.
495         ///
496         /// May generate a SendShutdown event on success, which should be relayed.
497         pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
498                 let (mut res, node_id, chan_option) = {
499                         let mut channel_state_lock = self.channel_state.lock().unwrap();
500                         let channel_state = channel_state_lock.borrow_parts();
501                         match channel_state.by_id.entry(channel_id.clone()) {
502                                 hash_map::Entry::Occupied(mut chan_entry) => {
503                                         let res = chan_entry.get_mut().get_shutdown()?;
504                                         if chan_entry.get().is_shutdown() {
505                                                 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
506                                                         channel_state.short_to_id.remove(&short_id);
507                                                 }
508                                                 (res, chan_entry.get().get_their_node_id(), Some(chan_entry.remove_entry().1))
509                                         } else { (res, chan_entry.get().get_their_node_id(), None) }
510                                 },
511                                 hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
512                         }
513                 };
514                 for htlc_source in res.1.drain(..) {
515                         // unknown_next_peer...I dunno who that is anymore....
516                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
517                 }
518                 let chan_update = if let Some(chan) = chan_option {
519                         if let Ok(update) = self.get_channel_update(&chan) {
520                                 Some(update)
521                         } else { None }
522                 } else { None };
523
524                 let mut events = self.pending_events.lock().unwrap();
525                 if let Some(update) = chan_update {
526                         events.push(events::Event::BroadcastChannelUpdate {
527                                 msg: update
528                         });
529                 }
530                 events.push(events::Event::SendShutdown {
531                         node_id,
532                         msg: res.0
533                 });
534
535                 Ok(())
536         }
537
538         #[inline]
539         fn finish_force_close_channel(&self, shutdown_res: (Vec<Transaction>, Vec<(HTLCSource, [u8; 32])>)) {
540                 let (local_txn, mut failed_htlcs) = shutdown_res;
541                 for htlc_source in failed_htlcs.drain(..) {
542                         // unknown_next_peer...I dunno who that is anymore....
543                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
544                 }
545                 for tx in local_txn {
546                         self.tx_broadcaster.broadcast_transaction(&tx);
547                 }
548                 //TODO: We need to have a way where outbound HTLC claims can result in us claiming the
549                 //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
550                 //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
551                 //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
552                 //timeouts are hit and our claims confirm).
553                 //TODO: In any case, we need to make sure we remove any pending htlc tracking (via
554                 //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
555         }
556
557         /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
558         /// the chain and rejecting new HTLCs on the given channel.
559         pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
560                 let mut chan = {
561                         let mut channel_state_lock = self.channel_state.lock().unwrap();
562                         let channel_state = channel_state_lock.borrow_parts();
563                         if let Some(chan) = channel_state.by_id.remove(channel_id) {
564                                 if let Some(short_id) = chan.get_short_channel_id() {
565                                         channel_state.short_to_id.remove(&short_id);
566                                 }
567                                 chan
568                         } else {
569                                 return;
570                         }
571                 };
572                 self.finish_force_close_channel(chan.force_shutdown());
573                 let mut events = self.pending_events.lock().unwrap();
574                 if let Ok(update) = self.get_channel_update(&chan) {
575                         events.push(events::Event::BroadcastChannelUpdate {
576                                 msg: update
577                         });
578                 }
579         }
580
581         /// Force close all channels, immediately broadcasting the latest local commitment transaction
582         /// for each to the chain and rejecting new HTLCs on each.
583         pub fn force_close_all_channels(&self) {
584                 for chan in self.list_channels() {
585                         self.force_close_channel(&chan.channel_id);
586                 }
587         }
588
589         #[inline]
590         fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) {
591                 ({
592                         let mut hmac = Hmac::new(Sha256::new(), &[0x72, 0x68, 0x6f]); // rho
593                         hmac.input(&shared_secret[..]);
594                         let mut res = [0; 32];
595                         hmac.raw_result(&mut res);
596                         res
597                 },
598                 {
599                         let mut hmac = Hmac::new(Sha256::new(), &[0x6d, 0x75]); // mu
600                         hmac.input(&shared_secret[..]);
601                         let mut res = [0; 32];
602                         hmac.raw_result(&mut res);
603                         res
604                 })
605         }
606
607         #[inline]
608         fn gen_um_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] {
609                 let mut hmac = Hmac::new(Sha256::new(), &[0x75, 0x6d]); // um
610                 hmac.input(&shared_secret[..]);
611                 let mut res = [0; 32];
612                 hmac.raw_result(&mut res);
613                 res
614         }
615
616         #[inline]
617         fn gen_ammag_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] {
618                 let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
619                 hmac.input(&shared_secret[..]);
620                 let mut res = [0; 32];
621                 hmac.raw_result(&mut res);
622                 res
623         }
624
625         // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
626         #[inline]
627         fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(SharedSecret, [u8; 32], PublicKey, &RouteHop)> (secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey, mut callback: FType) -> Result<(), secp256k1::Error> {
628                 let mut blinded_priv = session_priv.clone();
629                 let mut blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
630
631                 for hop in route.hops.iter() {
632                         let shared_secret = SharedSecret::new(secp_ctx, &hop.pubkey, &blinded_priv);
633
634                         let mut sha = Sha256::new();
635                         sha.input(&blinded_pub.serialize()[..]);
636                         sha.input(&shared_secret[..]);
637                         let mut blinding_factor = [0u8; 32];
638                         sha.result(&mut blinding_factor);
639
640                         let ephemeral_pubkey = blinded_pub;
641
642                         blinded_priv.mul_assign(secp_ctx, &SecretKey::from_slice(secp_ctx, &blinding_factor)?)?;
643                         blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
644
645                         callback(shared_secret, blinding_factor, ephemeral_pubkey, hop);
646                 }
647
648                 Ok(())
649         }
650
651         // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
652         fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, secp256k1::Error> {
653                 let mut res = Vec::with_capacity(route.hops.len());
654
655                 Self::construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _| {
656                         let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
657
658                         res.push(OnionKeys {
659                                 #[cfg(test)]
660                                 shared_secret,
661                                 #[cfg(test)]
662                                 blinding_factor: _blinding_factor,
663                                 ephemeral_pubkey,
664                                 rho,
665                                 mu,
666                         });
667                 })?;
668
669                 Ok(res)
670         }
671
672         // Only separated out for testing, we always use realm 0
673         fn build_onion_payloads_with_realm(route: &Route, starting_htlc_offset: u32, realm: u8) -> Result<(Vec<msgs::OnionHopData>, u64, u32), APIError> {
674                 let mut cur_value_msat = 0u64;
675                 let mut cur_cltv = starting_htlc_offset;
676                 let mut last_short_channel_id = 0;
677                 let mut res: Vec<msgs::OnionHopData> = Vec::with_capacity(route.hops.len());
678                 internal_traits::test_no_dealloc::<msgs::OnionHopData>(None);
679                 unsafe { res.set_len(route.hops.len()); }
680
681                 for (idx, hop) in route.hops.iter().enumerate().rev() {
682                         // First hop gets special values so that it can check, on receipt, that everything is
683                         // exactly as it should be (and the next hop isn't trying to probe to find out if we're
684                         // the intended recipient).
685                         let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
686                         let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
687                         res[idx] = msgs::OnionHopData {
688                                 realm: realm,
689                                 data: msgs::OnionRealm0HopData {
690                                         short_channel_id: last_short_channel_id,
691                                         amt_to_forward: value_msat,
692                                         outgoing_cltv_value: cltv,
693                                 },
694                                 hmac: [0; 32],
695                         };
696                         cur_value_msat += hop.fee_msat;
697                         if cur_value_msat >= 21000000 * 100000000 * 1000 {
698                                 return Err(APIError::RouteError{err: "Channel fees overflowed?!"});
699                         }
700                         cur_cltv += hop.cltv_expiry_delta as u32;
701                         if cur_cltv >= 500000000 {
702                                 return Err(APIError::RouteError{err: "Channel CLTV overflowed?!"});
703                         }
704                         last_short_channel_id = hop.short_channel_id;
705                 }
706                 Ok((res, cur_value_msat, cur_cltv))
707         }
708
709         #[inline]
710         fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec<msgs::OnionHopData>, u64, u32), APIError> {
711                 ChannelManager::build_onion_payloads_with_realm(route, starting_htlc_offset, 0)
712         }
713
714         #[inline]
715         fn shift_arr_right(arr: &mut [u8; 20*65]) {
716                 unsafe {
717                         ptr::copy(arr[0..].as_ptr(), arr[65..].as_mut_ptr(), 19*65);
718                 }
719                 for i in 0..65 {
720                         arr[i] = 0;
721                 }
722         }
723
724         #[inline]
725         fn xor_bufs(dst: &mut[u8], src: &[u8]) {
726                 assert_eq!(dst.len(), src.len());
727
728                 for i in 0..dst.len() {
729                         dst[i] ^= src[i];
730                 }
731         }
732
733         const ZERO:[u8; 21*65] = [0; 21*65];
734         fn construct_onion_packet(mut payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, associated_data: &[u8; 32]) -> msgs::OnionPacket {
735                 let mut buf = Vec::with_capacity(21*65);
736                 buf.resize(21*65, 0);
737
738                 let filler = {
739                         let iters = payloads.len() - 1;
740                         let end_len = iters * 65;
741                         let mut res = Vec::with_capacity(end_len);
742                         res.resize(end_len, 0);
743
744                         for (i, keys) in onion_keys.iter().enumerate() {
745                                 if i == payloads.len() - 1 { continue; }
746                                 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
747                                 chacha.process(&ChannelManager::ZERO, &mut buf); // We don't have a seek function :(
748                                 ChannelManager::xor_bufs(&mut res[0..(i + 1)*65], &buf[(20 - i)*65..21*65]);
749                         }
750                         res
751                 };
752
753                 let mut packet_data = [0; 20*65];
754                 let mut hmac_res = [0; 32];
755
756                 for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
757                         ChannelManager::shift_arr_right(&mut packet_data);
758                         payload.hmac = hmac_res;
759                         packet_data[0..65].copy_from_slice(&payload.encode()[..]);
760
761                         let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
762                         chacha.process(&packet_data, &mut buf[0..20*65]);
763                         packet_data[..].copy_from_slice(&buf[0..20*65]);
764
765                         if i == 0 {
766                                 packet_data[20*65 - filler.len()..20*65].copy_from_slice(&filler[..]);
767                         }
768
769                         let mut hmac = Hmac::new(Sha256::new(), &keys.mu);
770                         hmac.input(&packet_data);
771                         hmac.input(&associated_data[..]);
772                         hmac.raw_result(&mut hmac_res);
773                 }
774
775                 msgs::OnionPacket{
776                         version: 0,
777                         public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
778                         hop_data: packet_data,
779                         hmac: hmac_res,
780                 }
781         }
782
783         /// Encrypts a failure packet. raw_packet can either be a
784         /// msgs::DecodedOnionErrorPacket.encode() result or a msgs::OnionErrorPacket.data element.
785         fn encrypt_failure_packet(shared_secret: &SharedSecret, raw_packet: &[u8]) -> msgs::OnionErrorPacket {
786                 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
787
788                 let mut packet_crypted = Vec::with_capacity(raw_packet.len());
789                 packet_crypted.resize(raw_packet.len(), 0);
790                 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
791                 chacha.process(&raw_packet, &mut packet_crypted[..]);
792                 msgs::OnionErrorPacket {
793                         data: packet_crypted,
794                 }
795         }
796
797         fn build_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket {
798                 assert!(failure_data.len() <= 256 - 2);
799
800                 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
801
802                 let failuremsg = {
803                         let mut res = Vec::with_capacity(2 + failure_data.len());
804                         res.push(((failure_type >> 8) & 0xff) as u8);
805                         res.push(((failure_type >> 0) & 0xff) as u8);
806                         res.extend_from_slice(&failure_data[..]);
807                         res
808                 };
809                 let pad = {
810                         let mut res = Vec::with_capacity(256 - 2 - failure_data.len());
811                         res.resize(256 - 2 - failure_data.len(), 0);
812                         res
813                 };
814                 let mut packet = msgs::DecodedOnionErrorPacket {
815                         hmac: [0; 32],
816                         failuremsg: failuremsg,
817                         pad: pad,
818                 };
819
820                 let mut hmac = Hmac::new(Sha256::new(), &um);
821                 hmac.input(&packet.encode()[32..]);
822                 hmac.raw_result(&mut packet.hmac);
823
824                 packet
825         }
826
827         #[inline]
828         fn build_first_hop_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
829                 let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data);
830                 ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
831         }
832
833         fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder>) {
834                 macro_rules! get_onion_hash {
835                         () => {
836                                 {
837                                         let mut sha = Sha256::new();
838                                         sha.input(&msg.onion_routing_packet.hop_data);
839                                         let mut onion_hash = [0; 32];
840                                         sha.result(&mut onion_hash);
841                                         onion_hash
842                                 }
843                         }
844                 }
845
846                 if let Err(_) = msg.onion_routing_packet.public_key {
847                         log_info!(self, "Failed to accept/forward incoming HTLC with invalid ephemeral pubkey");
848                         return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
849                                 channel_id: msg.channel_id,
850                                 htlc_id: msg.htlc_id,
851                                 sha256_of_onion: get_onion_hash!(),
852                                 failure_code: 0x8000 | 0x4000 | 6,
853                         })), self.channel_state.lock().unwrap());
854                 }
855
856                 let shared_secret = SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key);
857                 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
858
859                 let mut channel_state = None;
860                 macro_rules! return_err {
861                         ($msg: expr, $err_code: expr, $data: expr) => {
862                                 {
863                                         log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
864                                         if channel_state.is_none() {
865                                                 channel_state = Some(self.channel_state.lock().unwrap());
866                                         }
867                                         return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
868                                                 channel_id: msg.channel_id,
869                                                 htlc_id: msg.htlc_id,
870                                                 reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
871                                         })), channel_state.unwrap());
872                                 }
873                         }
874                 }
875
876                 if msg.onion_routing_packet.version != 0 {
877                         //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
878                         //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
879                         //the hash doesn't really serve any purpuse - in the case of hashing all data, the
880                         //receiving node would have to brute force to figure out which version was put in the
881                         //packet by the node that send us the message, in the case of hashing the hop_data, the
882                         //node knows the HMAC matched, so they already know what is there...
883                         return_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4, &get_onion_hash!());
884                 }
885
886                 let mut hmac = Hmac::new(Sha256::new(), &mu);
887                 hmac.input(&msg.onion_routing_packet.hop_data);
888                 hmac.input(&msg.payment_hash);
889                 if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) {
890                         return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!());
891                 }
892
893                 let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
894                 let next_hop_data = {
895                         let mut decoded = [0; 65];
896                         chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
897                         match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
898                                 Err(err) => {
899                                         let error_code = match err {
900                                                 msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
901                                                 _ => 0x2000 | 2, // Should never happen
902                                         };
903                                         return_err!("Unable to decode our hop data", error_code, &[0;0]);
904                                 },
905                                 Ok(msg) => msg
906                         }
907                 };
908
909                 let pending_forward_info = if next_hop_data.hmac == [0; 32] {
910                                 // OUR PAYMENT!
911                                 // final_expiry_too_soon
912                                 if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
913                                         return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
914                                 }
915                                 // final_incorrect_htlc_amount
916                                 if next_hop_data.data.amt_to_forward > msg.amount_msat {
917                                         return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
918                                 }
919                                 // final_incorrect_cltv_expiry
920                                 if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
921                                         return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
922                                 }
923
924                                 // Note that we could obviously respond immediately with an update_fulfill_htlc
925                                 // message, however that would leak that we are the recipient of this payment, so
926                                 // instead we stay symmetric with the forwarding case, only responding (after a
927                                 // delay) once they've send us a commitment_signed!
928
929                                 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
930                                         onion_packet: None,
931                                         payment_hash: msg.payment_hash.clone(),
932                                         short_channel_id: 0,
933                                         incoming_shared_secret: shared_secret.clone(),
934                                         amt_to_forward: next_hop_data.data.amt_to_forward,
935                                         outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
936                                 })
937                         } else {
938                                 let mut new_packet_data = [0; 20*65];
939                                 chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
940                                 chacha.process(&ChannelManager::ZERO[0..65], &mut new_packet_data[19*65..]);
941
942                                 let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
943
944                                 let blinding_factor = {
945                                         let mut sha = Sha256::new();
946                                         sha.input(&new_pubkey.serialize()[..]);
947                                         sha.input(&shared_secret[..]);
948                                         let mut res = [0u8; 32];
949                                         sha.result(&mut res);
950                                         match SecretKey::from_slice(&self.secp_ctx, &res) {
951                                                 Err(_) => {
952                                                         return_err!("Blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
953                                                 },
954                                                 Ok(key) => key
955                                         }
956                                 };
957
958                                 if let Err(_) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) {
959                                         return_err!("New blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
960                                 }
961
962                                 let outgoing_packet = msgs::OnionPacket {
963                                         version: 0,
964                                         public_key: Ok(new_pubkey),
965                                         hop_data: new_packet_data,
966                                         hmac: next_hop_data.hmac.clone(),
967                                 };
968
969                                 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
970                                         onion_packet: Some(outgoing_packet),
971                                         payment_hash: msg.payment_hash.clone(),
972                                         short_channel_id: next_hop_data.data.short_channel_id,
973                                         incoming_shared_secret: shared_secret.clone(),
974                                         amt_to_forward: next_hop_data.data.amt_to_forward,
975                                         outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
976                                 })
977                         };
978
979                 channel_state = Some(self.channel_state.lock().unwrap());
980                 if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
981                         if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
982                                 let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
983                                 let forwarding_id = match id_option {
984                                         None => { // unknown_next_peer
985                                                 return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
986                                         },
987                                         Some(id) => id.clone(),
988                                 };
989                                 if let Some((err, code, chan_update)) = loop {
990                                         let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
991
992                                         if !chan.is_live() { // channel_disabled
993                                                 break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
994                                         }
995                                         if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
996                                                 break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
997                                         }
998                                         let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
999                                         if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
1000                                                 break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
1001                                         }
1002                                         if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
1003                                                 break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
1004                                         }
1005                                         let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1006                                         // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
1007                                         if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
1008                                                 break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
1009                                         }
1010                                         if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
1011                                                 break Some(("CLTV expiry is too far in the future", 21, None));
1012                                         }
1013                                         break None;
1014                                 }
1015                                 {
1016                                         let mut res = Vec::with_capacity(8 + 128);
1017                                         if code == 0x1000 | 11 || code == 0x1000 | 12 {
1018                                                 res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
1019                                         }
1020                                         else if code == 0x1000 | 13 {
1021                                                 res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
1022                                         }
1023                                         if let Some(chan_update) = chan_update {
1024                                                 res.extend_from_slice(&chan_update.encode_with_len()[..]);
1025                                         }
1026                                         return_err!(err, code, &res[..]);
1027                                 }
1028                         }
1029                 }
1030
1031                 (pending_forward_info, channel_state.unwrap())
1032         }
1033
1034         /// only fails if the channel does not yet have an assigned short_id
1035         fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
1036                 let short_channel_id = match chan.get_short_channel_id() {
1037                         None => return Err(HandleError{err: "Channel not yet established", action: None}),
1038                         Some(id) => id,
1039                 };
1040
1041                 let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
1042
1043                 let unsigned = msgs::UnsignedChannelUpdate {
1044                         chain_hash: self.genesis_hash,
1045                         short_channel_id: short_channel_id,
1046                         timestamp: chan.get_channel_update_count(),
1047                         flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
1048                         cltv_expiry_delta: CLTV_EXPIRY_DELTA,
1049                         htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
1050                         fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
1051                         fee_proportional_millionths: self.fee_proportional_millionths,
1052                         excess_data: Vec::new(),
1053                 };
1054
1055                 let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
1056                 let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key);
1057
1058                 Ok(msgs::ChannelUpdate {
1059                         signature: sig,
1060                         contents: unsigned
1061                 })
1062         }
1063
1064         /// Sends a payment along a given route.
1065         ///
1066         /// Value parameters are provided via the last hop in route, see documentation for RouteHop
1067         /// fields for more info.
1068         ///
1069         /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
1070         /// payment), we don't do anything to stop you! We always try to ensure that if the provided
1071         /// next hop knows the preimage to payment_hash they can claim an additional amount as
1072         /// specified in the last hop in the route! Thus, you should probably do your own
1073         /// payment_preimage tracking (which you should already be doing as they represent "proof of
1074         /// payment") and prevent double-sends yourself.
1075         ///
1076         /// May generate a SendHTLCs event on success, which should be relayed.
1077         ///
1078         /// Raises APIError::RoutError when invalid route or forward parameter
1079         /// (cltv_delta, fee, node public key) is specified
1080         pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
1081                 if route.hops.len() < 1 || route.hops.len() > 20 {
1082                         return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
1083                 }
1084                 let our_node_id = self.get_our_node_id();
1085                 for (idx, hop) in route.hops.iter().enumerate() {
1086                         if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
1087                                 return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
1088                         }
1089                 }
1090
1091                 let session_priv = if cfg!(test) {
1092                         SecretKey::from_slice(&self.secp_ctx, &[3; 32]).unwrap()
1093                 } else {
1094                         SecretKey::from_slice(&self.secp_ctx, &{
1095                                 let mut session_key = [0; 32];
1096                                 rng::fill_bytes(&mut session_key);
1097                                 session_key
1098                         }).expect("RNG is bad!")
1099                 };
1100
1101                 let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1102
1103                 let onion_keys = secp_call!(ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
1104                                 APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
1105                 let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
1106                 let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
1107
1108                 let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = {
1109                         let mut channel_state_lock = self.channel_state.lock().unwrap();
1110                         let channel_state = channel_state_lock.borrow_parts();
1111
1112                         let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
1113                                 None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
1114                                 Some(id) => id.clone(),
1115                         };
1116
1117                         let res = {
1118                                 let chan = channel_state.by_id.get_mut(&id).unwrap();
1119                                 if chan.get_their_node_id() != route.hops.first().unwrap().pubkey {
1120                                         return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
1121                                 }
1122                                 if !chan.is_live() {
1123                                         return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
1124                                 }
1125                                 chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
1126                                         route: route.clone(),
1127                                         session_priv: session_priv.clone(),
1128                                         first_hop_htlc_msat: htlc_msat,
1129                                 }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
1130                         };
1131
1132                         let first_hop_node_id = route.hops.first().unwrap().pubkey;
1133
1134                         match res {
1135                                 Some(msgs) => (first_hop_node_id, msgs),
1136                                 None => return Ok(()),
1137                         }
1138                 };
1139
1140                 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1141                         unimplemented!();
1142                 }
1143
1144                 let mut events = self.pending_events.lock().unwrap();
1145                 events.push(events::Event::UpdateHTLCs {
1146                         node_id: first_hop_node_id,
1147                         updates: msgs::CommitmentUpdate {
1148                                 update_add_htlcs: vec![update_add],
1149                                 update_fulfill_htlcs: Vec::new(),
1150                                 update_fail_htlcs: Vec::new(),
1151                                 update_fail_malformed_htlcs: Vec::new(),
1152                                 update_fee: None,
1153                                 commitment_signed,
1154                         },
1155                 });
1156                 Ok(())
1157         }
1158
1159         /// Call this upon creation of a funding transaction for the given channel.
1160         ///
1161         /// Panics if a funding transaction has already been provided for this channel.
1162         ///
1163         /// May panic if the funding_txo is duplicative with some other channel (note that this should
1164         /// be trivially prevented by using unique funding transaction keys per-channel).
1165         pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
1166                 macro_rules! add_pending_event {
1167                         ($event: expr) => {
1168                                 {
1169                                         let mut pending_events = self.pending_events.lock().unwrap();
1170                                         pending_events.push($event);
1171                                 }
1172                         }
1173                 }
1174
1175                 let (chan, msg, chan_monitor) = {
1176                         let mut channel_state = self.channel_state.lock().unwrap();
1177                         match channel_state.by_id.remove(temporary_channel_id) {
1178                                 Some(mut chan) => {
1179                                         match chan.get_outbound_funding_created(funding_txo) {
1180                                                 Ok(funding_msg) => {
1181                                                         (chan, funding_msg.0, funding_msg.1)
1182                                                 },
1183                                                 Err(e) => {
1184                                                         log_error!(self, "Got bad signatures: {}!", e.err);
1185                                                         mem::drop(channel_state);
1186                                                         add_pending_event!(events::Event::HandleError {
1187                                                                 node_id: chan.get_their_node_id(),
1188                                                                 action: e.action,
1189                                                         });
1190                                                         return;
1191                                                 },
1192                                         }
1193                                 },
1194                                 None => return
1195                         }
1196                 }; // Release channel lock for install_watch_outpoint call,
1197                 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1198                         unimplemented!();
1199                 }
1200                 add_pending_event!(events::Event::SendFundingCreated {
1201                         node_id: chan.get_their_node_id(),
1202                         msg: msg,
1203                 });
1204
1205                 let mut channel_state = self.channel_state.lock().unwrap();
1206                 match channel_state.by_id.entry(chan.channel_id()) {
1207                         hash_map::Entry::Occupied(_) => {
1208                                 panic!("Generated duplicate funding txid?");
1209                         },
1210                         hash_map::Entry::Vacant(e) => {
1211                                 e.insert(chan);
1212                         }
1213                 }
1214         }
1215
1216         fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
1217                 if !chan.should_announce() { return None }
1218
1219                 let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
1220                         Ok(res) => res,
1221                         Err(_) => return None, // Only in case of state precondition violations eg channel is closing
1222                 };
1223                 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
1224                 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
1225
1226                 Some(msgs::AnnouncementSignatures {
1227                         channel_id: chan.channel_id(),
1228                         short_channel_id: chan.get_short_channel_id().unwrap(),
1229                         node_signature: our_node_sig,
1230                         bitcoin_signature: our_bitcoin_sig,
1231                 })
1232         }
1233
1234         /// Processes HTLCs which are pending waiting on random forward delay.
1235         ///
1236         /// Should only really ever be called in response to an PendingHTLCsForwardable event.
1237         /// Will likely generate further events.
1238         pub fn process_pending_htlc_forwards(&self) {
1239                 let mut new_events = Vec::new();
1240                 let mut failed_forwards = Vec::new();
1241                 {
1242                         let mut channel_state_lock = self.channel_state.lock().unwrap();
1243                         let channel_state = channel_state_lock.borrow_parts();
1244
1245                         if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward {
1246                                 return;
1247                         }
1248
1249                         for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
1250                                 if short_chan_id != 0 {
1251                                         let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
1252                                                 Some(chan_id) => chan_id.clone(),
1253                                                 None => {
1254                                                         failed_forwards.reserve(pending_forwards.len());
1255                                                         for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1256                                                                 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1257                                                                         short_channel_id: prev_short_channel_id,
1258                                                                         htlc_id: prev_htlc_id,
1259                                                                         incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1260                                                                 });
1261                                                                 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
1262                                                         }
1263                                                         continue;
1264                                                 }
1265                                         };
1266                                         let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
1267
1268                                         let mut add_htlc_msgs = Vec::new();
1269                                         for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1270                                                 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1271                                                         short_channel_id: prev_short_channel_id,
1272                                                         htlc_id: prev_htlc_id,
1273                                                         incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1274                                                 });
1275                                                 match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
1276                                                         Err(_e) => {
1277                                                                 let chan_update = self.get_channel_update(forward_chan).unwrap();
1278                                                                 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
1279                                                                 continue;
1280                                                         },
1281                                                         Ok(update_add) => {
1282                                                                 match update_add {
1283                                                                         Some(msg) => { add_htlc_msgs.push(msg); },
1284                                                                         None => {
1285                                                                                 // Nothing to do here...we're waiting on a remote
1286                                                                                 // revoke_and_ack before we can add anymore HTLCs. The Channel
1287                                                                                 // will automatically handle building the update_add_htlc and
1288                                                                                 // commitment_signed messages when we can.
1289                                                                                 // TODO: Do some kind of timer to set the channel as !is_live()
1290                                                                                 // as we don't really want others relying on us relaying through
1291                                                                                 // this channel currently :/.
1292                                                                         }
1293                                                                 }
1294                                                         }
1295                                                 }
1296                                         }
1297
1298                                         if !add_htlc_msgs.is_empty() {
1299                                                 let (commitment_msg, monitor) = match forward_chan.send_commitment() {
1300                                                         Ok(res) => res,
1301                                                         Err(e) => {
1302                                                                 if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action {
1303                                                                 } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action {
1304                                                                 } else {
1305                                                                         panic!("Stated return value requirements in send_commitment() were not met");
1306                                                                 }
1307                                                                 //TODO: Handle...this is bad!
1308                                                                 continue;
1309                                                         },
1310                                                 };
1311                                                 new_events.push((Some(monitor), events::Event::UpdateHTLCs {
1312                                                         node_id: forward_chan.get_their_node_id(),
1313                                                         updates: msgs::CommitmentUpdate {
1314                                                                 update_add_htlcs: add_htlc_msgs,
1315                                                                 update_fulfill_htlcs: Vec::new(),
1316                                                                 update_fail_htlcs: Vec::new(),
1317                                                                 update_fail_malformed_htlcs: Vec::new(),
1318                                                                 update_fee: None,
1319                                                                 commitment_signed: commitment_msg,
1320                                                         },
1321                                                 }));
1322                                         }
1323                                 } else {
1324                                         for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1325                                                 let prev_hop_data = HTLCPreviousHopData {
1326                                                         short_channel_id: prev_short_channel_id,
1327                                                         htlc_id: prev_htlc_id,
1328                                                         incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1329                                                 };
1330                                                 match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
1331                                                         hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
1332                                                         hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
1333                                                 };
1334                                                 new_events.push((None, events::Event::PaymentReceived {
1335                                                         payment_hash: forward_info.payment_hash,
1336                                                         amt: forward_info.amt_to_forward,
1337                                                 }));
1338                                         }
1339                                 }
1340                         }
1341                 }
1342
1343                 for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
1344                         match update {
1345                                 None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
1346                                 Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
1347                         };
1348                 }
1349
1350                 if new_events.is_empty() { return }
1351
1352                 new_events.retain(|event| {
1353                         if let &Some(ref monitor) = &event.0 {
1354                                 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) {
1355                                         unimplemented!();// but def dont push the event...
1356                                 }
1357                         }
1358                         true
1359                 });
1360
1361                 let mut events = self.pending_events.lock().unwrap();
1362                 events.reserve(new_events.len());
1363                 for event in new_events.drain(..) {
1364                         events.push(event.1);
1365                 }
1366         }
1367
1368         /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
1369         pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool {
1370                 // TODO: Add ability to return 0x4000|16 (incorrect_payment_amount) if the amount we
1371                 // received is < expected or > 2*expected
1372                 let mut channel_state = Some(self.channel_state.lock().unwrap());
1373                 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
1374                 if let Some(mut sources) = removed_source {
1375                         for htlc_with_hash in sources.drain(..) {
1376                                 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1377                                 self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() });
1378                         }
1379                         true
1380                 } else { false }
1381         }
1382
1383         /// Fails an HTLC backwards to the sender of it to us.
1384         /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
1385         /// There are several callsites that do stupid things like loop over a list of payment_hashes
1386         /// to fail and take the channel_state lock for each iteration (as we take ownership and may
1387         /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
1388         /// still-available channels.
1389         fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) {
1390                 match source {
1391                         HTLCSource::OutboundRoute { .. } => {
1392                                 mem::drop(channel_state);
1393                                 if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
1394                                         let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
1395                                         let mut pending_events = self.pending_events.lock().unwrap();
1396                                         if let Some(channel_update) = channel_update {
1397                                                 pending_events.push(events::Event::PaymentFailureNetworkUpdate {
1398                                                         update: channel_update,
1399                                                 });
1400                                         }
1401                                         pending_events.push(events::Event::PaymentFailed {
1402                                                 payment_hash: payment_hash.clone(),
1403                                                 rejected_by_dest: !payment_retryable,
1404                                         });
1405                                 } else {
1406                                         panic!("should have onion error packet here");
1407                                 }
1408                         },
1409                         HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
1410                                 let err_packet = match onion_error {
1411                                         HTLCFailReason::Reason { failure_code, data } => {
1412                                                 let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
1413                                                 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
1414                                         },
1415                                         HTLCFailReason::ErrorPacket { err } => {
1416                                                 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
1417                                         }
1418                                 };
1419
1420                                 let (node_id, fail_msgs) = {
1421                                         let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1422                                                 Some(chan_id) => chan_id.clone(),
1423                                                 None => return
1424                                         };
1425
1426                                         let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1427                                         match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
1428                                                 Ok(msg) => (chan.get_their_node_id(), msg),
1429                                                 Err(_e) => {
1430                                                         //TODO: Do something with e?
1431                                                         return;
1432                                                 },
1433                                         }
1434                                 };
1435
1436                                 match fail_msgs {
1437                                         Some((msg, commitment_msg, chan_monitor)) => {
1438                                                 mem::drop(channel_state);
1439
1440                                                 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1441                                                         unimplemented!();// but def dont push the event...
1442                                                 }
1443
1444                                                 let mut pending_events = self.pending_events.lock().unwrap();
1445                                                 pending_events.push(events::Event::UpdateHTLCs {
1446                                                         node_id,
1447                                                         updates: msgs::CommitmentUpdate {
1448                                                                 update_add_htlcs: Vec::new(),
1449                                                                 update_fulfill_htlcs: Vec::new(),
1450                                                                 update_fail_htlcs: vec![msg],
1451                                                                 update_fail_malformed_htlcs: Vec::new(),
1452                                                                 update_fee: None,
1453                                                                 commitment_signed: commitment_msg,
1454                                                         },
1455                                                 });
1456                                         },
1457                                         None => {},
1458                                 }
1459                         },
1460                 }
1461         }
1462
1463         /// Provides a payment preimage in response to a PaymentReceived event, returning true and
1464         /// generating message events for the net layer to claim the payment, if possible. Thus, you
1465         /// should probably kick the net layer to go send messages if this returns true!
1466         ///
1467         /// May panic if called except in response to a PaymentReceived event.
1468         pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
1469                 let mut sha = Sha256::new();
1470                 sha.input(&payment_preimage);
1471                 let mut payment_hash = [0; 32];
1472                 sha.result(&mut payment_hash);
1473
1474                 let mut channel_state = Some(self.channel_state.lock().unwrap());
1475                 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
1476                 if let Some(mut sources) = removed_source {
1477                         for htlc_with_hash in sources.drain(..) {
1478                                 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1479                                 self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
1480                         }
1481                         true
1482                 } else { false }
1483         }
1484         fn claim_funds_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: [u8; 32]) {
1485                 match source {
1486                         HTLCSource::OutboundRoute { .. } => {
1487                                 mem::drop(channel_state);
1488                                 let mut pending_events = self.pending_events.lock().unwrap();
1489                                 pending_events.push(events::Event::PaymentSent {
1490                                         payment_preimage
1491                                 });
1492                         },
1493                         HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
1494                                 //TODO: Delay the claimed_funds relaying just like we do outbound relay!
1495                                 let (node_id, fulfill_msgs) = {
1496                                         let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1497                                                 Some(chan_id) => chan_id.clone(),
1498                                                 None => {
1499                                                         // TODO: There is probably a channel manager somewhere that needs to
1500                                                         // learn the preimage as the channel already hit the chain and that's
1501                                                         // why its missing.
1502                                                         return
1503                                                 }
1504                                         };
1505
1506                                         let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1507                                         match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
1508                                                 Ok(msg) => (chan.get_their_node_id(), msg),
1509                                                 Err(_e) => {
1510                                                         // TODO: There is probably a channel manager somewhere that needs to
1511                                                         // learn the preimage as the channel may be about to hit the chain.
1512                                                         //TODO: Do something with e?
1513                                                         return
1514                                                 },
1515                                         }
1516                                 };
1517
1518                                 mem::drop(channel_state);
1519                                 if let Some(chan_monitor) = fulfill_msgs.1 {
1520                                         if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1521                                                 unimplemented!();// but def dont push the event...
1522                                         }
1523                                 }
1524
1525                                 if let Some((msg, commitment_msg)) = fulfill_msgs.0 {
1526                                         let mut pending_events = self.pending_events.lock().unwrap();
1527                                         pending_events.push(events::Event::UpdateHTLCs {
1528                                                 node_id: node_id,
1529                                                 updates: msgs::CommitmentUpdate {
1530                                                         update_add_htlcs: Vec::new(),
1531                                                         update_fulfill_htlcs: vec![msg],
1532                                                         update_fail_htlcs: Vec::new(),
1533                                                         update_fail_malformed_htlcs: Vec::new(),
1534                                                         update_fee: None,
1535                                                         commitment_signed: commitment_msg,
1536                                                 }
1537                                         });
1538                                 }
1539                         },
1540                 }
1541         }
1542
1543         /// Gets the node_id held by this ChannelManager
1544         pub fn get_our_node_id(&self) -> PublicKey {
1545                 PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
1546         }
1547
1548         /// Used to restore channels to normal operation after a
1549         /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
1550         /// operation.
1551         pub fn test_restore_channel_monitor(&self) {
1552                 unimplemented!();
1553         }
1554
1555         fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, MsgHandleErrInternal> {
1556                 if msg.chain_hash != self.genesis_hash {
1557                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
1558                 }
1559                 let mut channel_state = self.channel_state.lock().unwrap();
1560                 if channel_state.by_id.contains_key(&msg.temporary_channel_id) {
1561                         return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone()));
1562                 }
1563
1564                 let chan_keys = if cfg!(feature = "fuzztarget") {
1565                         ChannelKeys {
1566                                 funding_key:               SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]).unwrap(),
1567                                 revocation_base_key:       SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0]).unwrap(),
1568                                 payment_base_key:          SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0]).unwrap(),
1569                                 delayed_payment_base_key:  SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0]).unwrap(),
1570                                 htlc_base_key:             SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0]).unwrap(),
1571                                 channel_close_key:         SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0]).unwrap(),
1572                                 channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0]).unwrap(),
1573                                 commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1574                         }
1575                 } else {
1576                         let mut key_seed = [0u8; 32];
1577                         rng::fill_bytes(&mut key_seed);
1578                         match ChannelKeys::new_from_seed(&key_seed) {
1579                                 Ok(key) => key,
1580                                 Err(_) => panic!("RNG is busted!")
1581                         }
1582                 };
1583
1584                 let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger))
1585                         .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
1586                 let accept_msg = channel.get_accept_channel();
1587                 channel_state.by_id.insert(channel.channel_id(), channel);
1588                 Ok(accept_msg)
1589         }
1590
1591         fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
1592                 let (value, output_script, user_id) = {
1593                         let mut channel_state = self.channel_state.lock().unwrap();
1594                         match channel_state.by_id.get_mut(&msg.temporary_channel_id) {
1595                                 Some(chan) => {
1596                                         if chan.get_their_node_id() != *their_node_id {
1597                                                 //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
1598                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1599                                         }
1600                                         chan.accept_channel(&msg)
1601                                                 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?;
1602                                         (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id())
1603                                 },
1604                                 //TODO: same as above
1605                                 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1606                         }
1607                 };
1608                 let mut pending_events = self.pending_events.lock().unwrap();
1609                 pending_events.push(events::Event::FundingGenerationReady {
1610                         temporary_channel_id: msg.temporary_channel_id,
1611                         channel_value_satoshis: value,
1612                         output_script: output_script,
1613                         user_channel_id: user_id,
1614                 });
1615                 Ok(())
1616         }
1617
1618         fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, MsgHandleErrInternal> {
1619                 let (chan, funding_msg, monitor_update) = {
1620                         let mut channel_state = self.channel_state.lock().unwrap();
1621                         match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
1622                                 hash_map::Entry::Occupied(mut chan) => {
1623                                         if chan.get().get_their_node_id() != *their_node_id {
1624                                                 //TODO: here and below MsgHandleErrInternal, #153 case
1625                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1626                                         }
1627                                         match chan.get_mut().funding_created(msg) {
1628                                                 Ok((funding_msg, monitor_update)) => {
1629                                                         (chan.remove(), funding_msg, monitor_update)
1630                                                 },
1631                                                 Err(e) => {
1632                                                         return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1633                                                 }
1634                                         }
1635                                 },
1636                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1637                         }
1638                 }; // Release channel lock for install_watch_outpoint call,
1639                    // note that this means if the remote end is misbehaving and sends a message for the same
1640                    // channel back-to-back with funding_created, we'll end up thinking they sent a message
1641                    // for a bogus channel.
1642                 if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
1643                         unimplemented!();
1644                 }
1645                 let mut channel_state = self.channel_state.lock().unwrap();
1646                 match channel_state.by_id.entry(funding_msg.channel_id) {
1647                         hash_map::Entry::Occupied(_) => {
1648                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
1649                         },
1650                         hash_map::Entry::Vacant(e) => {
1651                                 e.insert(chan);
1652                         }
1653                 }
1654                 Ok(funding_msg)
1655         }
1656
1657         fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
1658                 let (funding_txo, user_id, monitor) = {
1659                         let mut channel_state = self.channel_state.lock().unwrap();
1660                         match channel_state.by_id.get_mut(&msg.channel_id) {
1661                                 Some(chan) => {
1662                                         if chan.get_their_node_id() != *their_node_id {
1663                                                 //TODO: here and below MsgHandleErrInternal, #153 case
1664                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1665                                         }
1666                                         let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1667                                         (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor)
1668                                 },
1669                                 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1670                         }
1671                 };
1672                 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
1673                         unimplemented!();
1674                 }
1675                 let mut pending_events = self.pending_events.lock().unwrap();
1676                 pending_events.push(events::Event::FundingBroadcastSafe {
1677                         funding_txo: funding_txo,
1678                         user_channel_id: user_id,
1679                 });
1680                 Ok(())
1681         }
1682
1683         fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, MsgHandleErrInternal> {
1684                 let mut channel_state = self.channel_state.lock().unwrap();
1685                 match channel_state.by_id.get_mut(&msg.channel_id) {
1686                         Some(chan) => {
1687                                 if chan.get_their_node_id() != *their_node_id {
1688                                         //TODO: here and below MsgHandleErrInternal, #153 case
1689                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1690                                 }
1691                                 chan.funding_locked(&msg)
1692                                         .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1693                                 return Ok(self.get_announcement_sigs(chan));
1694                         },
1695                         None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1696                 };
1697         }
1698
1699         fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), MsgHandleErrInternal> {
1700                 let (mut res, chan_option) = {
1701                         let mut channel_state_lock = self.channel_state.lock().unwrap();
1702                         let channel_state = channel_state_lock.borrow_parts();
1703
1704                         match channel_state.by_id.entry(msg.channel_id.clone()) {
1705                                 hash_map::Entry::Occupied(mut chan_entry) => {
1706                                         if chan_entry.get().get_their_node_id() != *their_node_id {
1707                                                 //TODO: here and below MsgHandleErrInternal, #153 case
1708                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1709                                         }
1710                                         let res = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1711                                         if chan_entry.get().is_shutdown() {
1712                                                 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1713                                                         channel_state.short_to_id.remove(&short_id);
1714                                                 }
1715                                                 (res, Some(chan_entry.remove_entry().1))
1716                                         } else { (res, None) }
1717                                 },
1718                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1719                         }
1720                 };
1721                 for htlc_source in res.2.drain(..) {
1722                         // unknown_next_peer...I dunno who that is anymore....
1723                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
1724                 }
1725                 if let Some(chan) = chan_option {
1726                         if let Ok(update) = self.get_channel_update(&chan) {
1727                                 let mut events = self.pending_events.lock().unwrap();
1728                                 events.push(events::Event::BroadcastChannelUpdate {
1729                                         msg: update
1730                                 });
1731                         }
1732                 }
1733                 Ok((res.0, res.1))
1734         }
1735
1736         fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, MsgHandleErrInternal> {
1737                 let (res, chan_option) = {
1738                         let mut channel_state_lock = self.channel_state.lock().unwrap();
1739                         let channel_state = channel_state_lock.borrow_parts();
1740                         match channel_state.by_id.entry(msg.channel_id.clone()) {
1741                                 hash_map::Entry::Occupied(mut chan_entry) => {
1742                                         if chan_entry.get().get_their_node_id() != *their_node_id {
1743                                                 //TODO: here and below MsgHandleErrInternal, #153 case
1744                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1745                                         }
1746                                         let res = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1747                                         if res.1.is_some() {
1748                                                 // We're done with this channel, we've got a signed closing transaction and
1749                                                 // will send the closing_signed back to the remote peer upon return. This
1750                                                 // also implies there are no pending HTLCs left on the channel, so we can
1751                                                 // fully delete it from tracking (the channel monitor is still around to
1752                                                 // watch for old state broadcasts)!
1753                                                 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1754                                                         channel_state.short_to_id.remove(&short_id);
1755                                                 }
1756                                                 (res, Some(chan_entry.remove_entry().1))
1757                                         } else { (res, None) }
1758                                 },
1759                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1760                         }
1761                 };
1762                 if let Some(broadcast_tx) = res.1 {
1763                         self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
1764                 }
1765                 if let Some(chan) = chan_option {
1766                         if let Ok(update) = self.get_channel_update(&chan) {
1767                                 let mut events = self.pending_events.lock().unwrap();
1768                                 events.push(events::Event::BroadcastChannelUpdate {
1769                                         msg: update
1770                                 });
1771                         }
1772                 }
1773                 Ok(res.0)
1774         }
1775
1776         fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
1777                 //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
1778                 //determine the state of the payment based on our response/if we forward anything/the time
1779                 //we take to respond. We should take care to avoid allowing such an attack.
1780                 //
1781                 //TODO: There exists a further attack where a node may garble the onion data, forward it to
1782                 //us repeatedly garbled in different ways, and compare our error messages, which are
1783                 //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
1784                 //but we should prevent it anyway.
1785
1786                 let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
1787                 let channel_state = channel_state_lock.borrow_parts();
1788
1789                 match channel_state.by_id.get_mut(&msg.channel_id) {
1790                         Some(chan) => {
1791                                 if chan.get_their_node_id() != *their_node_id {
1792                                         //TODO: here MsgHandleErrInternal, #153 case
1793                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1794                                 }
1795                                 if !chan.is_usable() {
1796                                         return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Channel not yet available for receiving HTLCs", action: Some(msgs::ErrorAction::IgnoreError)}));
1797                                 }
1798                                 chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1799                         },
1800                         None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1801                 }
1802         }
1803
1804         fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
1805                 let mut channel_state = self.channel_state.lock().unwrap();
1806                 let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
1807                         Some(chan) => {
1808                                 if chan.get_their_node_id() != *their_node_id {
1809                                         //TODO: here and below MsgHandleErrInternal, #153 case
1810                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1811                                 }
1812                                 chan.update_fulfill_htlc(&msg)
1813                                         .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone()
1814                         },
1815                         None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1816                 };
1817                 self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone());
1818                 Ok(())
1819         }
1820
1821         // Process failure we got back from upstream on a payment we sent. Returns update and a boolean
1822         // indicating that the payment itself failed
1823         fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool) {
1824                 if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
1825                         macro_rules! onion_failure_log {
1826                                 ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => {
1827                                         log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value);
1828                                 };
1829                                 ( $error_code_textual: expr, $error_code: expr ) => {
1830                                         log_trace!(self, "{}({})", $error_code_textual, $error_code);
1831                                 };
1832                         }
1833
1834                         const BADONION: u16 = 0x8000;
1835                         const PERM: u16 = 0x4000;
1836                         const UPDATE: u16 = 0x1000;
1837
1838                         let mut res = None;
1839                         let mut htlc_msat = *first_hop_htlc_msat;
1840
1841                         // Handle packed channel/node updates for passing back for the route handler
1842                         Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
1843                                 if res.is_some() { return; }
1844
1845                                 let incoming_htlc_msat = htlc_msat;
1846                                 let amt_to_forward = htlc_msat - route_hop.fee_msat;
1847                                 htlc_msat = amt_to_forward;
1848
1849                                 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
1850
1851                                 let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
1852                                 decryption_tmp.resize(packet_decrypted.len(), 0);
1853                                 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
1854                                 chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
1855                                 packet_decrypted = decryption_tmp;
1856
1857                                 let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
1858
1859                                 if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
1860                                         let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
1861                                         let mut hmac = Hmac::new(Sha256::new(), &um);
1862                                         hmac.input(&err_packet.encode()[32..]);
1863                                         let mut calc_tag = [0u8; 32];
1864                                         hmac.raw_result(&mut calc_tag);
1865
1866                                         if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
1867                                                 if err_packet.failuremsg.len() < 2 {
1868                                                         // Useless packet that we can't use but it passed HMAC, so it
1869                                                         // definitely came from the peer in question
1870                                                         res = Some((None, !is_from_final_node));
1871                                                 } else {
1872                                                         let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]);
1873
1874                                                         match error_code & 0xff {
1875                                                                 1|2|3 => {
1876                                                                         // either from an intermediate or final node
1877                                                                         //   invalid_realm(PERM|1),
1878                                                                         //   temporary_node_failure(NODE|2)
1879                                                                         //   permanent_node_failure(PERM|NODE|2)
1880                                                                         //   required_node_feature_mssing(PERM|NODE|3)
1881                                                                         res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
1882                                                                                 node_id: route_hop.pubkey,
1883                                                                                 is_permanent: error_code & PERM == PERM,
1884                                                                         }), !(error_code & PERM == PERM && is_from_final_node)));
1885                                                                         // node returning invalid_realm is removed from network_map,
1886                                                                         // although NODE flag is not set, TODO: or remove channel only?
1887                                                                         // retry payment when removed node is not a final node
1888                                                                         return;
1889                                                                 },
1890                                                                 _ => {}
1891                                                         }
1892
1893                                                         if is_from_final_node {
1894                                                                 let payment_retryable = match error_code {
1895                                                                         c if c == PERM|15 => false, // unknown_payment_hash
1896                                                                         c if c == PERM|16 => false, // incorrect_payment_amount
1897                                                                         17 => true, // final_expiry_too_soon
1898                                                                         18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry
1899                                                                                 let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
1900                                                                                 true
1901                                                                         },
1902                                                                         19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount
1903                                                                                 let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
1904                                                                                 true
1905                                                                         },
1906                                                                         _ => {
1907                                                                                 // A final node has sent us either an invalid code or an error_code that
1908                                                                                 // MUST be sent from the processing node, or the formmat of failuremsg
1909                                                                                 // does not coform to the spec.
1910                                                                                 // Remove it from the network map and don't may retry payment
1911                                                                                 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
1912                                                                                         node_id: route_hop.pubkey,
1913                                                                                         is_permanent: true,
1914                                                                                 }), false));
1915                                                                                 return;
1916                                                                         }
1917                                                                 };
1918                                                                 res = Some((None, payment_retryable));
1919                                                                 return;
1920                                                         }
1921
1922                                                         // now, error_code should be only from the intermediate nodes
1923                                                         match error_code {
1924                                                                 _c if error_code & PERM == PERM => {
1925                                                                         res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
1926                                                                                 short_channel_id: route_hop.short_channel_id,
1927                                                                                 is_permanent: true,
1928                                                                         }), false));
1929                                                                 },
1930                                                                 _c if error_code & UPDATE == UPDATE => {
1931                                                                         let offset = match error_code {
1932                                                                                 c if c == UPDATE|7  => 0, // temporary_channel_failure
1933                                                                                 c if c == UPDATE|11 => 8, // amount_below_minimum
1934                                                                                 c if c == UPDATE|12 => 8, // fee_insufficient
1935                                                                                 c if c == UPDATE|13 => 4, // incorrect_cltv_expiry
1936                                                                                 c if c == UPDATE|14 => 0, // expiry_too_soon
1937                                                                                 c if c == UPDATE|20 => 2, // channel_disabled
1938                                                                                 _ =>  {
1939                                                                                         // node sending unknown code
1940                                                                                         res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
1941                                                                                                 node_id: route_hop.pubkey,
1942                                                                                                 is_permanent: true,
1943                                                                                         }), false));
1944                                                                                         return;
1945                                                                                 }
1946                                                                         };
1947
1948                                                                         if err_packet.failuremsg.len() >= offset + 2 {
1949                                                                                 let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize;
1950                                                                                 if err_packet.failuremsg.len() >= offset + 4 + update_len {
1951                                                                                         if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) {
1952                                                                                                 // if channel_update should NOT have caused the failure:
1953                                                                                                 // MAY treat the channel_update as invalid.
1954                                                                                                 let is_chan_update_invalid = match error_code {
1955                                                                                                         c if c == UPDATE|7 => { // temporary_channel_failure
1956                                                                                                                 false
1957                                                                                                         },
1958                                                                                                         c if c == UPDATE|11 => { // amount_below_minimum
1959                                                                                                                 let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
1960                                                                                                                 onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat);
1961                                                                                                                 incoming_htlc_msat > chan_update.contents.htlc_minimum_msat
1962                                                                                                         },
1963                                                                                                         c if c == UPDATE|12 => { // fee_insufficient
1964                                                                                                                 let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
1965                                                                                                                 let new_fee =  amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
1966                                                                                                                 onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat);
1967                                                                                                                 new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap()
1968                                                                                                         }
1969                                                                                                         c if c == UPDATE|13 => { // incorrect_cltv_expiry
1970                                                                                                                 let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
1971                                                                                                                 onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry);
1972                                                                                                                 route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta
1973                                                                                                         },
1974                                                                                                         c if c == UPDATE|20 => { // channel_disabled
1975                                                                                                                 let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]);
1976                                                                                                                 onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags);
1977                                                                                                                 chan_update.contents.flags & 0x01 == 0x01
1978                                                                                                         },
1979                                                                                                         c if c == UPDATE|21 => true, // expiry_too_far
1980                                                                                                         _ => { unreachable!(); },
1981                                                                                                 };
1982
1983                                                                                                 let msg = if is_chan_update_invalid { None } else {
1984                                                                                                         Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
1985                                                                                                                 msg: chan_update,
1986                                                                                                         })
1987                                                                                                 };
1988                                                                                                 res = Some((msg, true));
1989                                                                                                 return;
1990                                                                                         }
1991                                                                                 }
1992                                                                         }
1993                                                                 },
1994                                                                 _c if error_code & BADONION == BADONION => {
1995                                                                         //TODO
1996                                                                 },
1997                                                                 14 => { // expiry_too_soon
1998                                                                         res = Some((None, true));
1999                                                                         return;
2000                                                                 }
2001                                                                 _ => {
2002                                                                         // node sending unknown code
2003                                                                         res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2004                                                                                 node_id: route_hop.pubkey,
2005                                                                                 is_permanent: true,
2006                                                                         }), false));
2007                                                                         return;
2008                                                                 }
2009                                                         }
2010                                                 }
2011                                         }
2012                                 }
2013                         }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
2014                         res.unwrap_or((None, true))
2015                 } else { ((None, true)) }
2016         }
2017
2018         fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
2019                 let mut channel_state = self.channel_state.lock().unwrap();
2020                 match channel_state.by_id.get_mut(&msg.channel_id) {
2021                         Some(chan) => {
2022                                 if chan.get_their_node_id() != *their_node_id {
2023                                         //TODO: here and below MsgHandleErrInternal, #153 case
2024                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2025                                 }
2026                                 chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
2027                                         .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
2028                         },
2029                         None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2030                 }?;
2031                 Ok(())
2032         }
2033
2034         fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
2035                 let mut channel_state = self.channel_state.lock().unwrap();
2036                 match channel_state.by_id.get_mut(&msg.channel_id) {
2037                         Some(chan) => {
2038                                 if chan.get_their_node_id() != *their_node_id {
2039                                         //TODO: here and below MsgHandleErrInternal, #153 case
2040                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2041                                 }
2042                                 if (msg.failure_code & 0x8000) != 0 {
2043                                         return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id));
2044                                 }
2045                                 chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
2046                                         .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2047                                 Ok(())
2048                         },
2049                         None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2050                 }
2051         }
2052
2053         fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), MsgHandleErrInternal> {
2054                 let (revoke_and_ack, commitment_signed, chan_monitor) = {
2055                         let mut channel_state = self.channel_state.lock().unwrap();
2056                         match channel_state.by_id.get_mut(&msg.channel_id) {
2057                                 Some(chan) => {
2058                                         if chan.get_their_node_id() != *their_node_id {
2059                                                 //TODO: here and below MsgHandleErrInternal, #153 case
2060                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2061                                         }
2062                                         chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?
2063                                 },
2064                                 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2065                         }
2066                 };
2067                 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2068                         unimplemented!();
2069                 }
2070
2071                 Ok((revoke_and_ack, commitment_signed))
2072         }
2073
2074         fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, MsgHandleErrInternal> {
2075                 let ((res, mut pending_forwards, mut pending_failures, chan_monitor), short_channel_id) = {
2076                         let mut channel_state = self.channel_state.lock().unwrap();
2077                         match channel_state.by_id.get_mut(&msg.channel_id) {
2078                                 Some(chan) => {
2079                                         if chan.get_their_node_id() != *their_node_id {
2080                                                 //TODO: here and below MsgHandleErrInternal, #153 case
2081                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2082                                         }
2083                                         (chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
2084                                 },
2085                                 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2086                         }
2087                 };
2088                 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2089                         unimplemented!();
2090                 }
2091                 for failure in pending_failures.drain(..) {
2092                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
2093                 }
2094
2095                 let mut forward_event = None;
2096                 if !pending_forwards.is_empty() {
2097                         let mut channel_state = self.channel_state.lock().unwrap();
2098                         if channel_state.forward_htlcs.is_empty() {
2099                                 forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
2100                                 channel_state.next_forward = forward_event.unwrap();
2101                         }
2102                         for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
2103                                 match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
2104                                         hash_map::Entry::Occupied(mut entry) => {
2105                                                 entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info });
2106                                         },
2107                                         hash_map::Entry::Vacant(entry) => {
2108                                                 entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info }));
2109                                         }
2110                                 }
2111                         }
2112                 }
2113                 match forward_event {
2114                         Some(time) => {
2115                                 let mut pending_events = self.pending_events.lock().unwrap();
2116                                 pending_events.push(events::Event::PendingHTLCsForwardable {
2117                                         time_forwardable: time
2118                                 });
2119                         }
2120                         None => {},
2121                 }
2122
2123                 Ok(res)
2124         }
2125
2126         fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
2127                 let mut channel_state = self.channel_state.lock().unwrap();
2128                 match channel_state.by_id.get_mut(&msg.channel_id) {
2129                         Some(chan) => {
2130                                 if chan.get_their_node_id() != *their_node_id {
2131                                         //TODO: here and below MsgHandleErrInternal, #153 case
2132                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2133                                 }
2134                                 chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
2135                         },
2136                         None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2137                 }
2138         }
2139
2140         fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
2141                 let (chan_announcement, chan_update) = {
2142                         let mut channel_state = self.channel_state.lock().unwrap();
2143                         match channel_state.by_id.get_mut(&msg.channel_id) {
2144                                 Some(chan) => {
2145                                         if chan.get_their_node_id() != *their_node_id {
2146                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2147                                         }
2148                                         if !chan.is_usable() {
2149                                                 return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
2150                                         }
2151
2152                                         let our_node_id = self.get_our_node_id();
2153                                         let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())
2154                                                 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2155
2156                                         let were_node_one = announcement.node_id_1 == our_node_id;
2157                                         let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
2158                                         let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id);
2159                                         secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action);
2160                                         secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action);
2161
2162                                         let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
2163
2164                                         (msgs::ChannelAnnouncement {
2165                                                 node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
2166                                                 node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
2167                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
2168                                                 bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
2169                                                 contents: announcement,
2170                                         }, self.get_channel_update(chan).unwrap()) // can only fail if we're not in a ready state
2171                                 },
2172                                 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2173                         }
2174                 };
2175                 let mut pending_events = self.pending_events.lock().unwrap();
2176                 pending_events.push(events::Event::BroadcastChannelAnnouncement { msg: chan_announcement, update_msg: chan_update });
2177                 Ok(())
2178         }
2179
2180         fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), MsgHandleErrInternal> {
2181                 let (res, chan_monitor) = {
2182                         let mut channel_state = self.channel_state.lock().unwrap();
2183                         match channel_state.by_id.get_mut(&msg.channel_id) {
2184                                 Some(chan) => {
2185                                         if chan.get_their_node_id() != *their_node_id {
2186                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2187                                         }
2188                                         let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg)
2189                                                 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2190                                         (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor)
2191                                 },
2192                                 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2193                         }
2194                 };
2195                 if let Some(monitor) = chan_monitor {
2196                         if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
2197                                 unimplemented!();
2198                         }
2199                 }
2200                 res
2201         }
2202
2203         /// Begin Update fee process. Allowed only on an outbound channel.
2204         /// If successful, will generate a UpdateHTLCs event, so you should probably poll
2205         /// PeerManager::process_events afterwards.
2206         /// Note: This API is likely to change!
2207         #[doc(hidden)]
2208         pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
2209                 let mut channel_state = self.channel_state.lock().unwrap();
2210                 match channel_state.by_id.get_mut(&channel_id) {
2211                         None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
2212                         Some(chan) => {
2213                                 if !chan.is_outbound() {
2214                                         return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
2215                                 }
2216                                 if !chan.is_live() {
2217                                         return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
2218                                 }
2219                                 if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? {
2220                                         if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2221                                                 unimplemented!();
2222                                         }
2223                                         let mut pending_events = self.pending_events.lock().unwrap();
2224                                         pending_events.push(events::Event::UpdateHTLCs {
2225                                                 node_id: chan.get_their_node_id(),
2226                                                 updates: msgs::CommitmentUpdate {
2227                                                         update_add_htlcs: Vec::new(),
2228                                                         update_fulfill_htlcs: Vec::new(),
2229                                                         update_fail_htlcs: Vec::new(),
2230                                                         update_fail_malformed_htlcs: Vec::new(),
2231                                                         update_fee: Some(update_fee),
2232                                                         commitment_signed,
2233                                                 },
2234                                         });
2235                                 }
2236                         },
2237                 }
2238                 Ok(())
2239         }
2240 }
2241
2242 impl events::EventsProvider for ChannelManager {
2243         fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
2244                 let mut pending_events = self.pending_events.lock().unwrap();
2245                 let mut ret = Vec::new();
2246                 mem::swap(&mut ret, &mut *pending_events);
2247                 ret
2248         }
2249 }
2250
2251 impl ChainListener for ChannelManager {
2252         fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
2253                 let mut new_events = Vec::new();
2254                 let mut failed_channels = Vec::new();
2255                 {
2256                         let mut channel_lock = self.channel_state.lock().unwrap();
2257                         let channel_state = channel_lock.borrow_parts();
2258                         let short_to_id = channel_state.short_to_id;
2259                         channel_state.by_id.retain(|_, channel| {
2260                                 let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
2261                                 if let Ok(Some(funding_locked)) = chan_res {
2262                                         let announcement_sigs = self.get_announcement_sigs(channel);
2263                                         new_events.push(events::Event::SendFundingLocked {
2264                                                 node_id: channel.get_their_node_id(),
2265                                                 msg: funding_locked,
2266                                                 announcement_sigs: announcement_sigs
2267                                         });
2268                                         short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
2269                                 } else if let Err(e) = chan_res {
2270                                         new_events.push(events::Event::HandleError {
2271                                                 node_id: channel.get_their_node_id(),
2272                                                 action: e.action,
2273                                         });
2274                                         if channel.is_shutdown() {
2275                                                 return false;
2276                                         }
2277                                 }
2278                                 if let Some(funding_txo) = channel.get_funding_txo() {
2279                                         for tx in txn_matched {
2280                                                 for inp in tx.input.iter() {
2281                                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
2282                                                                 if let Some(short_id) = channel.get_short_channel_id() {
2283                                                                         short_to_id.remove(&short_id);
2284                                                                 }
2285                                                                 // It looks like our counterparty went on-chain. We go ahead and
2286                                                                 // broadcast our latest local state as well here, just in case its
2287                                                                 // some kind of SPV attack, though we expect these to be dropped.
2288                                                                 failed_channels.push(channel.force_shutdown());
2289                                                                 if let Ok(update) = self.get_channel_update(&channel) {
2290                                                                         new_events.push(events::Event::BroadcastChannelUpdate {
2291                                                                                 msg: update
2292                                                                         });
2293                                                                 }
2294                                                                 return false;
2295                                                         }
2296                                                 }
2297                                         }
2298                                 }
2299                                 if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
2300                                         if let Some(short_id) = channel.get_short_channel_id() {
2301                                                 short_to_id.remove(&short_id);
2302                                         }
2303                                         failed_channels.push(channel.force_shutdown());
2304                                         // If would_broadcast_at_height() is true, the channel_monitor will broadcast
2305                                         // the latest local tx for us, so we should skip that here (it doesn't really
2306                                         // hurt anything, but does make tests a bit simpler).
2307                                         failed_channels.last_mut().unwrap().0 = Vec::new();
2308                                         if let Ok(update) = self.get_channel_update(&channel) {
2309                                                 new_events.push(events::Event::BroadcastChannelUpdate {
2310                                                         msg: update
2311                                                 });
2312                                         }
2313                                         return false;
2314                                 }
2315                                 true
2316                         });
2317                 }
2318                 for failure in failed_channels.drain(..) {
2319                         self.finish_force_close_channel(failure);
2320                 }
2321                 let mut pending_events = self.pending_events.lock().unwrap();
2322                 for funding_locked in new_events.drain(..) {
2323                         pending_events.push(funding_locked);
2324                 }
2325                 self.latest_block_height.store(height as usize, Ordering::Release);
2326         }
2327
2328         /// We force-close the channel without letting our counterparty participate in the shutdown
2329         fn block_disconnected(&self, header: &BlockHeader) {
2330                 let mut new_events = Vec::new();
2331                 let mut failed_channels = Vec::new();
2332                 {
2333                         let mut channel_lock = self.channel_state.lock().unwrap();
2334                         let channel_state = channel_lock.borrow_parts();
2335                         let short_to_id = channel_state.short_to_id;
2336                         channel_state.by_id.retain(|_,  v| {
2337                                 if v.block_disconnected(header) {
2338                                         if let Some(short_id) = v.get_short_channel_id() {
2339                                                 short_to_id.remove(&short_id);
2340                                         }
2341                                         failed_channels.push(v.force_shutdown());
2342                                         if let Ok(update) = self.get_channel_update(&v) {
2343                                                 new_events.push(events::Event::BroadcastChannelUpdate {
2344                                                         msg: update
2345                                                 });
2346                                         }
2347                                         false
2348                                 } else {
2349                                         true
2350                                 }
2351                         });
2352                 }
2353                 for failure in failed_channels.drain(..) {
2354                         self.finish_force_close_channel(failure);
2355                 }
2356                 if !new_events.is_empty() {
2357                         let mut pending_events = self.pending_events.lock().unwrap();
2358                         for funding_locked in new_events.drain(..) {
2359                                 pending_events.push(funding_locked);
2360                         }
2361                 }
2362                 self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
2363         }
2364 }
2365
2366 macro_rules! handle_error {
2367         ($self: ident, $internal: expr, $their_node_id: expr) => {
2368                 match $internal {
2369                         Ok(msg) => Ok(msg),
2370                         Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
2371                                 if needs_channel_force_close {
2372                                         match &err.action {
2373                                                 &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
2374                                                         if msg.channel_id == [0; 32] {
2375                                                                 $self.peer_disconnected(&$their_node_id, true);
2376                                                         } else {
2377                                                                 $self.force_close_channel(&msg.channel_id);
2378                                                         }
2379                                                 },
2380                                                 &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
2381                                                 &Some(msgs::ErrorAction::IgnoreError) => {},
2382                                                 &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
2383                                                         if msg.channel_id == [0; 32] {
2384                                                                 $self.peer_disconnected(&$their_node_id, true);
2385                                                         } else {
2386                                                                 $self.force_close_channel(&msg.channel_id);
2387                                                         }
2388                                                 },
2389                                                 &None => {},
2390                                         }
2391                                 }
2392                                 Err(err)
2393                         },
2394                 }
2395         }
2396 }
2397
2398 impl ChannelMessageHandler for ChannelManager {
2399         //TODO: Handle errors and close channel (or so)
2400         fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, HandleError> {
2401                 handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id)
2402         }
2403
2404         fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
2405                 handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id)
2406         }
2407
2408         fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, HandleError> {
2409                 handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id)
2410         }
2411
2412         fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
2413                 handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id)
2414         }
2415
2416         fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, HandleError> {
2417                 handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id)
2418         }
2419
2420         fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), HandleError> {
2421                 handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id)
2422         }
2423
2424         fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, HandleError> {
2425                 handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id)
2426         }
2427
2428         fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
2429                 handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id)
2430         }
2431
2432         fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
2433                 handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
2434         }
2435
2436         fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
2437                 handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
2438         }
2439
2440         fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
2441                 handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id)
2442         }
2443
2444         fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
2445                 handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id)
2446         }
2447
2448         fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
2449                 handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id)
2450         }
2451
2452         fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
2453                 handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id)
2454         }
2455
2456         fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
2457                 handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
2458         }
2459
2460         fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), HandleError> {
2461                 handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
2462         }
2463
2464         fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
2465                 let mut new_events = Vec::new();
2466                 let mut failed_channels = Vec::new();
2467                 let mut failed_payments = Vec::new();
2468                 {
2469                         let mut channel_state_lock = self.channel_state.lock().unwrap();
2470                         let channel_state = channel_state_lock.borrow_parts();
2471                         let short_to_id = channel_state.short_to_id;
2472                         if no_connection_possible {
2473                                 channel_state.by_id.retain(|_, chan| {
2474                                         if chan.get_their_node_id() == *their_node_id {
2475                                                 if let Some(short_id) = chan.get_short_channel_id() {
2476                                                         short_to_id.remove(&short_id);
2477                                                 }
2478                                                 failed_channels.push(chan.force_shutdown());
2479                                                 if let Ok(update) = self.get_channel_update(&chan) {
2480                                                         new_events.push(events::Event::BroadcastChannelUpdate {
2481                                                                 msg: update
2482                                                         });
2483                                                 }
2484                                                 false
2485                                         } else {
2486                                                 true
2487                                         }
2488                                 });
2489                         } else {
2490                                 channel_state.by_id.retain(|_, chan| {
2491                                         if chan.get_their_node_id() == *their_node_id {
2492                                                 //TODO: mark channel disabled (and maybe announce such after a timeout).
2493                                                 let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused();
2494                                                 if !failed_adds.is_empty() {
2495                                                         let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
2496                                                         failed_payments.push((chan_update, failed_adds));
2497                                                 }
2498                                                 if chan.is_shutdown() {
2499                                                         if let Some(short_id) = chan.get_short_channel_id() {
2500                                                                 short_to_id.remove(&short_id);
2501                                                         }
2502                                                         return false;
2503                                                 }
2504                                         }
2505                                         true
2506                                 })
2507                         }
2508                 }
2509                 for failure in failed_channels.drain(..) {
2510                         self.finish_force_close_channel(failure);
2511                 }
2512                 if !new_events.is_empty() {
2513                         let mut pending_events = self.pending_events.lock().unwrap();
2514                         for event in new_events.drain(..) {
2515                                 pending_events.push(event);
2516                         }
2517                 }
2518                 for (chan_update, mut htlc_sources) in failed_payments {
2519                         for (htlc_source, payment_hash) in htlc_sources.drain(..) {
2520                                 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
2521                         }
2522                 }
2523         }
2524
2525         fn peer_connected(&self, their_node_id: &PublicKey) -> Vec<msgs::ChannelReestablish> {
2526                 let mut res = Vec::new();
2527                 let mut channel_state = self.channel_state.lock().unwrap();
2528                 channel_state.by_id.retain(|_, chan| {
2529                         if chan.get_their_node_id() == *their_node_id {
2530                                 if !chan.have_received_message() {
2531                                         // If we created this (outbound) channel while we were disconnected from the
2532                                         // peer we probably failed to send the open_channel message, which is now
2533                                         // lost. We can't have had anything pending related to this channel, so we just
2534                                         // drop it.
2535                                         false
2536                                 } else {
2537                                         res.push(chan.get_channel_reestablish());
2538                                         true
2539                                 }
2540                         } else { true }
2541                 });
2542                 //TODO: Also re-broadcast announcement_signatures
2543                 res
2544         }
2545
2546         fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
2547                 if msg.channel_id == [0; 32] {
2548                         for chan in self.list_channels() {
2549                                 if chan.remote_network_id == *their_node_id {
2550                                         self.force_close_channel(&chan.channel_id);
2551                                 }
2552                         }
2553                 } else {
2554                         self.force_close_channel(&msg.channel_id);
2555                 }
2556         }
2557 }
2558
2559 #[cfg(test)]
2560 mod tests {
2561         use chain::chaininterface;
2562         use chain::transaction::OutPoint;
2563         use chain::chaininterface::ChainListener;
2564         use ln::channelmanager::{ChannelManager,OnionKeys,HTLCSource};
2565         use ln::channelmonitor::{CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
2566         use ln::router::{Route, RouteHop, Router};
2567         use ln::msgs;
2568         use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate};
2569         use util::{test_utils, rng};
2570         use util::events::{Event, EventsProvider};
2571         use util::errors::APIError;
2572         use util::logger::Logger;
2573         use util::ser::Writeable;
2574
2575         use bitcoin::util::hash::Sha256dHash;
2576         use bitcoin::blockdata::block::{Block, BlockHeader};
2577         use bitcoin::blockdata::transaction::{Transaction, TxOut};
2578         use bitcoin::blockdata::constants::genesis_block;
2579         use bitcoin::network::constants::Network;
2580         use bitcoin::network::serialize::serialize;
2581         use bitcoin::network::serialize::BitcoinHash;
2582
2583         use hex;
2584
2585         use secp256k1::{Secp256k1, Message};
2586         use secp256k1::key::{PublicKey,SecretKey};
2587
2588         use crypto::sha2::Sha256;
2589         use crypto::digest::Digest;
2590
2591         use rand::{thread_rng,Rng};
2592
2593         use std::cell::RefCell;
2594         use std::collections::{BTreeSet, HashMap};
2595         use std::default::Default;
2596         use std::rc::Rc;
2597         use std::sync::{Arc, Mutex};
2598         use std::sync::atomic::Ordering;
2599         use std::time::Instant;
2600         use std::mem;
2601
2602         fn build_test_onion_keys() -> Vec<OnionKeys> {
2603                 // Keys from BOLT 4, used in both test vector tests
2604                 let secp_ctx = Secp256k1::new();
2605
2606                 let route = Route {
2607                         hops: vec!(
2608                                         RouteHop {
2609                                                 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
2610                                                 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2611                                         },
2612                                         RouteHop {
2613                                                 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
2614                                                 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2615                                         },
2616                                         RouteHop {
2617                                                 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
2618                                                 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2619                                         },
2620                                         RouteHop {
2621                                                 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]).unwrap(),
2622                                                 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2623                                         },
2624                                         RouteHop {
2625                                                 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()[..]).unwrap(),
2626                                                 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2627                                         },
2628                         ),
2629                 };
2630
2631                 let session_priv = SecretKey::from_slice(&secp_ctx, &hex::decode("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap();
2632
2633                 let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
2634                 assert_eq!(onion_keys.len(), route.hops.len());
2635                 onion_keys
2636         }
2637
2638         #[test]
2639         fn onion_vectors() {
2640                 // Packet creation test vectors from BOLT 4
2641                 let onion_keys = build_test_onion_keys();
2642
2643                 assert_eq!(onion_keys[0].shared_secret[..], hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
2644                 assert_eq!(onion_keys[0].blinding_factor[..], hex::decode("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
2645                 assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
2646                 assert_eq!(onion_keys[0].rho, hex::decode("ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986").unwrap()[..]);
2647                 assert_eq!(onion_keys[0].mu, hex::decode("b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba").unwrap()[..]);
2648
2649                 assert_eq!(onion_keys[1].shared_secret[..], hex::decode("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
2650                 assert_eq!(onion_keys[1].blinding_factor[..], hex::decode("bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f").unwrap()[..]);
2651                 assert_eq!(onion_keys[1].ephemeral_pubkey.serialize()[..], hex::decode("028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2").unwrap()[..]);
2652                 assert_eq!(onion_keys[1].rho, hex::decode("450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59").unwrap()[..]);
2653                 assert_eq!(onion_keys[1].mu, hex::decode("05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9").unwrap()[..]);
2654
2655                 assert_eq!(onion_keys[2].shared_secret[..], hex::decode("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
2656                 assert_eq!(onion_keys[2].blinding_factor[..], hex::decode("a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5").unwrap()[..]);
2657                 assert_eq!(onion_keys[2].ephemeral_pubkey.serialize()[..], hex::decode("03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0").unwrap()[..]);
2658                 assert_eq!(onion_keys[2].rho, hex::decode("11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea").unwrap()[..]);
2659                 assert_eq!(onion_keys[2].mu, hex::decode("caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78").unwrap()[..]);
2660
2661                 assert_eq!(onion_keys[3].shared_secret[..], hex::decode("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
2662                 assert_eq!(onion_keys[3].blinding_factor[..], hex::decode("7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262").unwrap()[..]);
2663                 assert_eq!(onion_keys[3].ephemeral_pubkey.serialize()[..], hex::decode("031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595").unwrap()[..]);
2664                 assert_eq!(onion_keys[3].rho, hex::decode("cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e").unwrap()[..]);
2665                 assert_eq!(onion_keys[3].mu, hex::decode("5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9").unwrap()[..]);
2666
2667                 assert_eq!(onion_keys[4].shared_secret[..], hex::decode("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
2668                 assert_eq!(onion_keys[4].blinding_factor[..], hex::decode("c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205").unwrap()[..]);
2669                 assert_eq!(onion_keys[4].ephemeral_pubkey.serialize()[..], hex::decode("03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4").unwrap()[..]);
2670                 assert_eq!(onion_keys[4].rho, hex::decode("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
2671                 assert_eq!(onion_keys[4].mu, hex::decode("8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a").unwrap()[..]);
2672
2673                 // Test vectors below are flat-out wrong: they claim to set outgoing_cltv_value to non-0 :/
2674                 let payloads = vec!(
2675                         msgs::OnionHopData {
2676                                 realm: 0,
2677                                 data: msgs::OnionRealm0HopData {
2678                                         short_channel_id: 0,
2679                                         amt_to_forward: 0,
2680                                         outgoing_cltv_value: 0,
2681                                 },
2682                                 hmac: [0; 32],
2683                         },
2684                         msgs::OnionHopData {
2685                                 realm: 0,
2686                                 data: msgs::OnionRealm0HopData {
2687                                         short_channel_id: 0x0101010101010101,
2688                                         amt_to_forward: 0x0100000001,
2689                                         outgoing_cltv_value: 0,
2690                                 },
2691                                 hmac: [0; 32],
2692                         },
2693                         msgs::OnionHopData {
2694                                 realm: 0,
2695                                 data: msgs::OnionRealm0HopData {
2696                                         short_channel_id: 0x0202020202020202,
2697                                         amt_to_forward: 0x0200000002,
2698                                         outgoing_cltv_value: 0,
2699                                 },
2700                                 hmac: [0; 32],
2701                         },
2702                         msgs::OnionHopData {
2703                                 realm: 0,
2704                                 data: msgs::OnionRealm0HopData {
2705                                         short_channel_id: 0x0303030303030303,
2706                                         amt_to_forward: 0x0300000003,
2707                                         outgoing_cltv_value: 0,
2708                                 },
2709                                 hmac: [0; 32],
2710                         },
2711                         msgs::OnionHopData {
2712                                 realm: 0,
2713                                 data: msgs::OnionRealm0HopData {
2714                                         short_channel_id: 0x0404040404040404,
2715                                         amt_to_forward: 0x0400000004,
2716                                         outgoing_cltv_value: 0,
2717                                 },
2718                                 hmac: [0; 32],
2719                         },
2720                 );
2721
2722                 let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]);
2723                 // Just check the final packet encoding, as it includes all the per-hop vectors in it
2724                 // anyway...
2725                 assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
2726         }
2727
2728         #[test]
2729         fn test_failure_packet_onion() {
2730                 // Returning Errors test vectors from BOLT 4
2731
2732                 let onion_keys = build_test_onion_keys();
2733                 let onion_error = ChannelManager::build_failure_packet(&onion_keys[4].shared_secret, 0x2002, &[0; 0]);
2734                 assert_eq!(onion_error.encode(), hex::decode("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap());
2735
2736                 let onion_packet_1 = ChannelManager::encrypt_failure_packet(&onion_keys[4].shared_secret, &onion_error.encode()[..]);
2737                 assert_eq!(onion_packet_1.data, hex::decode("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap());
2738
2739                 let onion_packet_2 = ChannelManager::encrypt_failure_packet(&onion_keys[3].shared_secret, &onion_packet_1.data[..]);
2740                 assert_eq!(onion_packet_2.data, hex::decode("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap());
2741
2742                 let onion_packet_3 = ChannelManager::encrypt_failure_packet(&onion_keys[2].shared_secret, &onion_packet_2.data[..]);
2743                 assert_eq!(onion_packet_3.data, hex::decode("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap());
2744
2745                 let onion_packet_4 = ChannelManager::encrypt_failure_packet(&onion_keys[1].shared_secret, &onion_packet_3.data[..]);
2746                 assert_eq!(onion_packet_4.data, hex::decode("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap());
2747
2748                 let onion_packet_5 = ChannelManager::encrypt_failure_packet(&onion_keys[0].shared_secret, &onion_packet_4.data[..]);
2749                 assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
2750         }
2751
2752         fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
2753                 assert!(chain.does_match_tx(tx));
2754                 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
2755                 chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
2756                 for i in 2..100 {
2757                         header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
2758                         chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
2759                 }
2760         }
2761
2762         struct Node {
2763                 chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
2764                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
2765                 chan_monitor: Arc<test_utils::TestChannelMonitor>,
2766                 node: Arc<ChannelManager>,
2767                 router: Router,
2768                 network_payment_count: Rc<RefCell<u8>>,
2769                 network_chan_count: Rc<RefCell<u32>>,
2770         }
2771         impl Drop for Node {
2772                 fn drop(&mut self) {
2773                         if !::std::thread::panicking() {
2774                                 // Check that we processed all pending events
2775                                 assert_eq!(self.node.get_and_clear_pending_events().len(), 0);
2776                                 assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0);
2777                         }
2778                 }
2779         }
2780
2781         fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
2782                 create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
2783         }
2784
2785         fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
2786                 let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
2787                 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
2788                 (announcement, as_update, bs_update, channel_id, tx)
2789         }
2790
2791         fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction {
2792                 node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
2793
2794                 let events_1 = node_a.node.get_and_clear_pending_events();
2795                 assert_eq!(events_1.len(), 1);
2796                 let accept_chan = match events_1[0] {
2797                         Event::SendOpenChannel { ref node_id, ref msg } => {
2798                                 assert_eq!(*node_id, node_b.node.get_our_node_id());
2799                                 node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), msg).unwrap()
2800                         },
2801                         _ => panic!("Unexpected event"),
2802                 };
2803
2804                 node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_chan).unwrap();
2805
2806                 let chan_id = *node_a.network_chan_count.borrow();
2807                 let tx;
2808                 let funding_output;
2809
2810                 let events_2 = node_a.node.get_and_clear_pending_events();
2811                 assert_eq!(events_2.len(), 1);
2812                 match events_2[0] {
2813                         Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
2814                                 assert_eq!(*channel_value_satoshis, channel_value);
2815                                 assert_eq!(user_channel_id, 42);
2816
2817                                 tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
2818                                         value: *channel_value_satoshis, script_pubkey: output_script.clone(),
2819                                 }]};
2820                                 funding_output = OutPoint::new(Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0);
2821
2822                                 node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
2823                                 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
2824                                 assert_eq!(added_monitors.len(), 1);
2825                                 assert_eq!(added_monitors[0].0, funding_output);
2826                                 added_monitors.clear();
2827                         },
2828                         _ => panic!("Unexpected event"),
2829                 }
2830
2831                 let events_3 = node_a.node.get_and_clear_pending_events();
2832                 assert_eq!(events_3.len(), 1);
2833                 let funding_signed = match events_3[0] {
2834                         Event::SendFundingCreated { ref node_id, ref msg } => {
2835                                 assert_eq!(*node_id, node_b.node.get_our_node_id());
2836                                 let res = node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), msg).unwrap();
2837                                 let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
2838                                 assert_eq!(added_monitors.len(), 1);
2839                                 assert_eq!(added_monitors[0].0, funding_output);
2840                                 added_monitors.clear();
2841                                 res
2842                         },
2843                         _ => panic!("Unexpected event"),
2844                 };
2845
2846                 node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &funding_signed).unwrap();
2847                 {
2848                         let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
2849                         assert_eq!(added_monitors.len(), 1);
2850                         assert_eq!(added_monitors[0].0, funding_output);
2851                         added_monitors.clear();
2852                 }
2853
2854                 let events_4 = node_a.node.get_and_clear_pending_events();
2855                 assert_eq!(events_4.len(), 1);
2856                 match events_4[0] {
2857                         Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
2858                                 assert_eq!(user_channel_id, 42);
2859                                 assert_eq!(*funding_txo, funding_output);
2860                         },
2861                         _ => panic!("Unexpected event"),
2862                 };
2863
2864                 tx
2865         }
2866
2867         fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
2868                 confirm_transaction(&node_b.chain_monitor, &tx, tx.version);
2869                 let events_5 = node_b.node.get_and_clear_pending_events();
2870                 assert_eq!(events_5.len(), 1);
2871                 match events_5[0] {
2872                         Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
2873                                 assert_eq!(*node_id, node_a.node.get_our_node_id());
2874                                 assert!(announcement_sigs.is_none());
2875                                 node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap()
2876                         },
2877                         _ => panic!("Unexpected event"),
2878                 };
2879
2880                 let channel_id;
2881
2882                 confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
2883                 let events_6 = node_a.node.get_and_clear_pending_events();
2884                 assert_eq!(events_6.len(), 1);
2885                 (match events_6[0] {
2886                         Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
2887                                 channel_id = msg.channel_id.clone();
2888                                 assert_eq!(*node_id, node_b.node.get_our_node_id());
2889                                 (msg.clone(), announcement_sigs.clone().unwrap())
2890                         },
2891                         _ => panic!("Unexpected event"),
2892                 }, channel_id)
2893         }
2894
2895         fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
2896                 let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
2897                 let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
2898                 (msgs, chan_id, tx)
2899         }
2900
2901         fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
2902                 let bs_announcement_sigs = {
2903                         let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap();
2904                         node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
2905                         bs_announcement_sigs
2906                 };
2907
2908                 let events_7 = node_b.node.get_and_clear_pending_events();
2909                 assert_eq!(events_7.len(), 1);
2910                 let (announcement, bs_update) = match events_7[0] {
2911                         Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
2912                                 (msg, update_msg)
2913                         },
2914                         _ => panic!("Unexpected event"),
2915                 };
2916
2917                 node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
2918                 let events_8 = node_a.node.get_and_clear_pending_events();
2919                 assert_eq!(events_8.len(), 1);
2920                 let as_update = match events_8[0] {
2921                         Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
2922                                 assert!(*announcement == *msg);
2923                                 update_msg
2924                         },
2925                         _ => panic!("Unexpected event"),
2926                 };
2927
2928                 *node_a.network_chan_count.borrow_mut() += 1;
2929
2930                 ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
2931         }
2932
2933         fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
2934                 create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
2935         }
2936
2937         fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
2938                 let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
2939                 for node in nodes {
2940                         assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
2941                         node.router.handle_channel_update(&chan_announcement.1).unwrap();
2942                         node.router.handle_channel_update(&chan_announcement.2).unwrap();
2943                 }
2944                 (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
2945         }
2946
2947         macro_rules! check_spends {
2948                 ($tx: expr, $spends_tx: expr) => {
2949                         {
2950                                 let mut funding_tx_map = HashMap::new();
2951                                 let spends_tx = $spends_tx;
2952                                 funding_tx_map.insert(spends_tx.txid(), spends_tx);
2953                                 $tx.verify(&funding_tx_map).unwrap();
2954                         }
2955                 }
2956         }
2957
2958         fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
2959                 let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) };
2960                 let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
2961                 let (tx_a, tx_b);
2962
2963                 node_a.close_channel(channel_id).unwrap();
2964                 let events_1 = node_a.get_and_clear_pending_events();
2965                 assert_eq!(events_1.len(), 1);
2966                 let shutdown_a = match events_1[0] {
2967                         Event::SendShutdown { ref node_id, ref msg } => {
2968                                 assert_eq!(node_id, &node_b.get_our_node_id());
2969                                 msg.clone()
2970                         },
2971                         _ => panic!("Unexpected event"),
2972                 };
2973
2974                 let (shutdown_b, mut closing_signed_b) = node_b.handle_shutdown(&node_a.get_our_node_id(), &shutdown_a).unwrap();
2975                 if !close_inbound_first {
2976                         assert!(closing_signed_b.is_none());
2977                 }
2978                 let (empty_a, mut closing_signed_a) = node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b.unwrap()).unwrap();
2979                 assert!(empty_a.is_none());
2980                 if close_inbound_first {
2981                         assert!(closing_signed_a.is_none());
2982                         closing_signed_a = node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
2983                         assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
2984                         tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
2985
2986                         let empty_b = node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
2987                         assert!(empty_b.is_none());
2988                         assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
2989                         tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
2990                 } else {
2991                         closing_signed_b = node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
2992                         assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
2993                         tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
2994
2995                         let empty_a2 = node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
2996                         assert!(empty_a2.is_none());
2997                         assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
2998                         tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
2999                 }
3000                 assert_eq!(tx_a, tx_b);
3001                 check_spends!(tx_a, funding_tx);
3002
3003                 let events_2 = node_a.get_and_clear_pending_events();
3004                 assert_eq!(events_2.len(), 1);
3005                 let as_update = match events_2[0] {
3006                         Event::BroadcastChannelUpdate { ref msg } => {
3007                                 msg.clone()
3008                         },
3009                         _ => panic!("Unexpected event"),
3010                 };
3011
3012                 let events_3 = node_b.get_and_clear_pending_events();
3013                 assert_eq!(events_3.len(), 1);
3014                 let bs_update = match events_3[0] {
3015                         Event::BroadcastChannelUpdate { ref msg } => {
3016                                 msg.clone()
3017                         },
3018                         _ => panic!("Unexpected event"),
3019                 };
3020
3021                 (as_update, bs_update)
3022         }
3023
3024         struct SendEvent {
3025                 node_id: PublicKey,
3026                 msgs: Vec<msgs::UpdateAddHTLC>,
3027                 commitment_msg: msgs::CommitmentSigned,
3028         }
3029         impl SendEvent {
3030                 fn from_event(event: Event) -> SendEvent {
3031                         match event {
3032                                 Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
3033                                         assert!(update_fulfill_htlcs.is_empty());
3034                                         assert!(update_fail_htlcs.is_empty());
3035                                         assert!(update_fail_malformed_htlcs.is_empty());
3036                                         assert!(update_fee.is_none());
3037                                         SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed }
3038                                 },
3039                                 _ => panic!("Unexpected event type!"),
3040                         }
3041                 }
3042         }
3043
3044         macro_rules! check_added_monitors {
3045                 ($node: expr, $count: expr) => {
3046                         {
3047                                 let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
3048                                 assert_eq!(added_monitors.len(), $count);
3049                                 added_monitors.clear();
3050                         }
3051                 }
3052         }
3053
3054         macro_rules! commitment_signed_dance {
3055                 ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
3056                         {
3057                                 check_added_monitors!($node_a, 0);
3058                                 let (as_revoke_and_ack, as_commitment_signed) = $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
3059                                 check_added_monitors!($node_a, 1);
3060                                 check_added_monitors!($node_b, 0);
3061                                 assert!($node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
3062                                 check_added_monitors!($node_b, 1);
3063                                 let (bs_revoke_and_ack, bs_none) = $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
3064                                 assert!(bs_none.is_none());
3065                                 check_added_monitors!($node_b, 1);
3066                                 if $fail_backwards {
3067                                         assert!($node_a.node.get_and_clear_pending_events().is_empty());
3068                                 }
3069                                 assert!($node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
3070                                 {
3071                                         let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
3072                                         if $fail_backwards {
3073                                                 assert_eq!(added_monitors.len(), 2);
3074                                                 assert!(added_monitors[0].0 != added_monitors[1].0);
3075                                         } else {
3076                                                 assert_eq!(added_monitors.len(), 1);
3077                                         }
3078                                         added_monitors.clear();
3079                                 }
3080                         }
3081                 }
3082         }
3083
3084         macro_rules! get_payment_preimage_hash {
3085                 ($node: expr) => {
3086                         {
3087                                 let payment_preimage = [*$node.network_payment_count.borrow(); 32];
3088                                 *$node.network_payment_count.borrow_mut() += 1;
3089                                 let mut payment_hash = [0; 32];
3090                                 let mut sha = Sha256::new();
3091                                 sha.input(&payment_preimage[..]);
3092                                 sha.result(&mut payment_hash);
3093                                 (payment_preimage, payment_hash)
3094                         }
3095                 }
3096         }
3097
3098         fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
3099                 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
3100
3101                 let mut payment_event = {
3102                         origin_node.node.send_payment(route, our_payment_hash).unwrap();
3103                         check_added_monitors!(origin_node, 1);
3104
3105                         let mut events = origin_node.node.get_and_clear_pending_events();
3106                         assert_eq!(events.len(), 1);
3107                         SendEvent::from_event(events.remove(0))
3108                 };
3109                 let mut prev_node = origin_node;
3110
3111                 for (idx, &node) in expected_route.iter().enumerate() {
3112                         assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
3113
3114                         node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3115                         check_added_monitors!(node, 0);
3116                         commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
3117
3118                         let events_1 = node.node.get_and_clear_pending_events();
3119                         assert_eq!(events_1.len(), 1);
3120                         match events_1[0] {
3121                                 Event::PendingHTLCsForwardable { .. } => { },
3122                                 _ => panic!("Unexpected event"),
3123                         };
3124
3125                         node.node.channel_state.lock().unwrap().next_forward = Instant::now();
3126                         node.node.process_pending_htlc_forwards();
3127
3128                         let mut events_2 = node.node.get_and_clear_pending_events();
3129                         assert_eq!(events_2.len(), 1);
3130                         if idx == expected_route.len() - 1 {
3131                                 match events_2[0] {
3132                                         Event::PaymentReceived { ref payment_hash, amt } => {
3133                                                 assert_eq!(our_payment_hash, *payment_hash);
3134                                                 assert_eq!(amt, recv_value);
3135                                         },
3136                                         _ => panic!("Unexpected event"),
3137                                 }
3138                         } else {
3139                                 check_added_monitors!(node, 1);
3140                                 payment_event = SendEvent::from_event(events_2.remove(0));
3141                                 assert_eq!(payment_event.msgs.len(), 1);
3142                         }
3143
3144                         prev_node = node;
3145                 }
3146
3147                 (our_payment_preimage, our_payment_hash)
3148         }
3149
3150         fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) {
3151                 assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
3152                 check_added_monitors!(expected_route.last().unwrap(), 1);
3153
3154                 let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
3155                 macro_rules! update_fulfill_dance {
3156                         ($node: expr, $prev_node: expr, $last_node: expr) => {
3157                                 {
3158                                         $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3159                                         if $last_node {
3160                                                 check_added_monitors!($node, 0);
3161                                         } else {
3162                                                 check_added_monitors!($node, 1);
3163                                         }
3164                                         commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
3165                                 }
3166                         }
3167                 }
3168
3169                 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
3170                 let mut prev_node = expected_route.last().unwrap();
3171                 for (idx, node) in expected_route.iter().rev().enumerate() {
3172                         assert_eq!(expected_next_node, node.node.get_our_node_id());
3173                         if next_msgs.is_some() {
3174                                 update_fulfill_dance!(node, prev_node, false);
3175                         }
3176
3177                         let events = node.node.get_and_clear_pending_events();
3178                         if !skip_last || idx != expected_route.len() - 1 {
3179                                 assert_eq!(events.len(), 1);
3180                                 match events[0] {
3181                                         Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
3182                                                 assert!(update_add_htlcs.is_empty());
3183                                                 assert_eq!(update_fulfill_htlcs.len(), 1);
3184                                                 assert!(update_fail_htlcs.is_empty());
3185                                                 assert!(update_fail_malformed_htlcs.is_empty());
3186                                                 assert!(update_fee.is_none());
3187                                                 expected_next_node = node_id.clone();
3188                                                 next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()));
3189                                         },
3190                                         _ => panic!("Unexpected event"),
3191                                 }
3192                         } else {
3193                                 assert!(events.is_empty());
3194                         }
3195                         if !skip_last && idx == expected_route.len() - 1 {
3196                                 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
3197                         }
3198
3199                         prev_node = node;
3200                 }
3201
3202                 if !skip_last {
3203                         update_fulfill_dance!(origin_node, expected_route.first().unwrap(), true);
3204                         let events = origin_node.node.get_and_clear_pending_events();
3205                         assert_eq!(events.len(), 1);
3206                         match events[0] {
3207                                 Event::PaymentSent { payment_preimage } => {
3208                                         assert_eq!(payment_preimage, our_payment_preimage);
3209                                 },
3210                                 _ => panic!("Unexpected event"),
3211                         }
3212                 }
3213         }
3214
3215         fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) {
3216                 claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage);
3217         }
3218
3219         const TEST_FINAL_CLTV: u32 = 32;
3220
3221         fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
3222                 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
3223                 assert_eq!(route.hops.len(), expected_route.len());
3224                 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
3225                         assert_eq!(hop.pubkey, node.node.get_our_node_id());
3226                 }
3227
3228                 send_along_route(origin_node, route, expected_route, recv_value)
3229         }
3230
3231         fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
3232                 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
3233                 assert_eq!(route.hops.len(), expected_route.len());
3234                 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
3235                         assert_eq!(hop.pubkey, node.node.get_our_node_id());
3236                 }
3237
3238                 let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
3239
3240                 let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
3241                 match err {
3242                         APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
3243                         _ => panic!("Unknown error variants"),
3244                 };
3245         }
3246
3247         fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
3248                 let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
3249                 claim_payment(&origin, expected_route, our_payment_preimage);
3250         }
3251
3252         fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) {
3253                 assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
3254                 check_added_monitors!(expected_route.last().unwrap(), 1);
3255
3256                 let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
3257                 macro_rules! update_fail_dance {
3258                         ($node: expr, $prev_node: expr, $last_node: expr) => {
3259                                 {
3260                                         $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3261                                         commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
3262                                 }
3263                         }
3264                 }
3265
3266                 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
3267                 let mut prev_node = expected_route.last().unwrap();
3268                 for (idx, node) in expected_route.iter().rev().enumerate() {
3269                         assert_eq!(expected_next_node, node.node.get_our_node_id());
3270                         if next_msgs.is_some() {
3271                                 // We may be the "last node" for the purpose of the commitment dance if we're
3272                                 // skipping the last node (implying it is disconnected) and we're the
3273                                 // second-to-last node!
3274                                 update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1);
3275                         }
3276
3277                         let events = node.node.get_and_clear_pending_events();
3278                         if !skip_last || idx != expected_route.len() - 1 {
3279                                 assert_eq!(events.len(), 1);
3280                                 match events[0] {
3281                                         Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
3282                                                 assert!(update_add_htlcs.is_empty());
3283                                                 assert!(update_fulfill_htlcs.is_empty());
3284                                                 assert_eq!(update_fail_htlcs.len(), 1);
3285                                                 assert!(update_fail_malformed_htlcs.is_empty());
3286                                                 assert!(update_fee.is_none());
3287                                                 expected_next_node = node_id.clone();
3288                                                 next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
3289                                         },
3290                                         _ => panic!("Unexpected event"),
3291                                 }
3292                         } else {
3293                                 assert!(events.is_empty());
3294                         }
3295                         if !skip_last && idx == expected_route.len() - 1 {
3296                                 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
3297                         }
3298
3299                         prev_node = node;
3300                 }
3301
3302                 if !skip_last {
3303                         update_fail_dance!(origin_node, expected_route.first().unwrap(), true);
3304
3305                         let events = origin_node.node.get_and_clear_pending_events();
3306                         assert_eq!(events.len(), 1);
3307                         match events[0] {
3308                                 Event::PaymentFailed { payment_hash, rejected_by_dest } => {
3309                                         assert_eq!(payment_hash, our_payment_hash);
3310                                         assert!(rejected_by_dest);
3311                                 },
3312                                 _ => panic!("Unexpected event"),
3313                         }
3314                 }
3315         }
3316
3317         fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) {
3318                 fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
3319         }
3320
3321         fn create_network(node_count: usize) -> Vec<Node> {
3322                 let mut nodes = Vec::new();
3323                 let mut rng = thread_rng();
3324                 let secp_ctx = Secp256k1::new();
3325                 let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
3326
3327                 let chan_count = Rc::new(RefCell::new(0));
3328                 let payment_count = Rc::new(RefCell::new(0));
3329
3330                 for _ in 0..node_count {
3331                         let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
3332                         let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
3333                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
3334                         let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
3335                         let node_id = {
3336                                 let mut key_slice = [0; 32];
3337                                 rng.fill_bytes(&mut key_slice);
3338                                 SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
3339                         };
3340                         let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap();
3341                         let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id), chain_monitor.clone(), Arc::clone(&logger));
3342                         nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router,
3343                                 network_payment_count: payment_count.clone(),
3344                                 network_chan_count: chan_count.clone(),
3345                         });
3346                 }
3347
3348                 nodes
3349         }
3350
3351         #[test]
3352         fn test_async_inbound_update_fee() {
3353                 let mut nodes = create_network(2);
3354                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3355                 let channel_id = chan.2;
3356
3357                 macro_rules! get_feerate {
3358                         ($node: expr) => {{
3359                                 let chan_lock = $node.node.channel_state.lock().unwrap();
3360                                 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3361                                 chan.get_feerate()
3362                         }}
3363                 }
3364
3365                 // balancing
3366                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
3367
3368                 // A                                        B
3369                 // update_fee                            ->
3370                 // send (1) commitment_signed            -.
3371                 //                                       <- update_add_htlc/commitment_signed
3372                 // send (2) RAA (awaiting remote revoke) -.
3373                 // (1) commitment_signed is delivered    ->
3374                 //                                       .- send (3) RAA (awaiting remote revoke)
3375                 // (2) RAA is delivered                  ->
3376                 //                                       .- send (4) commitment_signed
3377                 //                                       <- (3) RAA is delivered
3378                 // send (5) commitment_signed            -.
3379                 //                                       <- (4) commitment_signed is delivered
3380                 // send (6) RAA                          -.
3381                 // (5) commitment_signed is delivered    ->
3382                 //                                       <- RAA
3383                 // (6) RAA is delivered                  ->
3384
3385                 // First nodes[0] generates an update_fee
3386                 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
3387                 check_added_monitors!(nodes[0], 1);
3388
3389                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3390                 assert_eq!(events_0.len(), 1);
3391                 let (update_msg, commitment_signed) = match events_0[0] { // (1)
3392                         Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
3393                                 (update_fee.as_ref(), commitment_signed)
3394                         },
3395                         _ => panic!("Unexpected event"),
3396                 };
3397
3398                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3399
3400                 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
3401                 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
3402                 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
3403                 check_added_monitors!(nodes[1], 1);
3404
3405                 let payment_event = {
3406                         let mut events_1 = nodes[1].node.get_and_clear_pending_events();
3407                         assert_eq!(events_1.len(), 1);
3408                         SendEvent::from_event(events_1.remove(0))
3409                 };
3410                 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
3411                 assert_eq!(payment_event.msgs.len(), 1);
3412
3413                 // ...now when the messages get delivered everyone should be happy
3414                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3415                 let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
3416                 assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack
3417                 check_added_monitors!(nodes[0], 1);
3418
3419                 // deliver(1), generate (3):
3420                 let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3421                 assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting nodes[0] revoke_and_ack
3422                 check_added_monitors!(nodes[1], 1);
3423
3424                 let bs_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2)
3425                 assert!(bs_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (4)
3426                 assert!(bs_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (4)
3427                 assert!(bs_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (4)
3428                 assert!(bs_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (4)
3429                 assert!(bs_update.as_ref().unwrap().update_fee.is_none()); // (4)
3430                 check_added_monitors!(nodes[1], 1);
3431
3432                 let as_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); // deliver (3)
3433                 assert!(as_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (5)
3434                 assert!(as_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (5)
3435                 assert!(as_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (5)
3436                 assert!(as_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (5)
3437                 assert!(as_update.as_ref().unwrap().update_fee.is_none()); // (5)
3438                 check_added_monitors!(nodes[0], 1);
3439
3440                 let (as_second_revoke, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.unwrap().commitment_signed).unwrap(); // deliver (4)
3441                 assert!(as_second_commitment_signed.is_none()); // only (6)
3442                 check_added_monitors!(nodes[0], 1);
3443
3444                 let (bs_second_revoke, bs_second_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.unwrap().commitment_signed).unwrap(); // deliver (5)
3445                 assert!(bs_second_commitment_signed.is_none());
3446                 check_added_monitors!(nodes[1], 1);
3447
3448                 assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none());
3449                 check_added_monitors!(nodes[0], 1);
3450
3451                 let events_2 = nodes[0].node.get_and_clear_pending_events();
3452                 assert_eq!(events_2.len(), 1);
3453                 match events_2[0] {
3454                         Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
3455                         _ => panic!("Unexpected event"),
3456                 }
3457
3458                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); // deliver (6)
3459                 check_added_monitors!(nodes[1], 1);
3460         }
3461
3462         #[test]
3463         fn test_update_fee_unordered_raa() {
3464                 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
3465                 // crash in an earlier version of the update_fee patch)
3466                 let mut nodes = create_network(2);
3467                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3468                 let channel_id = chan.2;
3469
3470                 macro_rules! get_feerate {
3471                         ($node: expr) => {{
3472                                 let chan_lock = $node.node.channel_state.lock().unwrap();
3473                                 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3474                                 chan.get_feerate()
3475                         }}
3476                 }
3477
3478                 // balancing
3479                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
3480
3481                 // First nodes[0] generates an update_fee
3482                 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
3483                 check_added_monitors!(nodes[0], 1);
3484
3485                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3486                 assert_eq!(events_0.len(), 1);
3487                 let update_msg = match events_0[0] { // (1)
3488                         Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
3489                                 update_fee.as_ref()
3490                         },
3491                         _ => panic!("Unexpected event"),
3492                 };
3493
3494                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3495
3496                 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
3497                 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
3498                 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
3499                 check_added_monitors!(nodes[1], 1);
3500
3501                 let payment_event = {
3502                         let mut events_1 = nodes[1].node.get_and_clear_pending_events();
3503                         assert_eq!(events_1.len(), 1);
3504                         SendEvent::from_event(events_1.remove(0))
3505                 };
3506                 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
3507                 assert_eq!(payment_event.msgs.len(), 1);
3508
3509                 // ...now when the messages get delivered everyone should be happy
3510                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3511                 let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
3512                 assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack
3513                 check_added_monitors!(nodes[0], 1);
3514
3515                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); // deliver (2)
3516                 check_added_monitors!(nodes[1], 1);
3517
3518                 // We can't continue, sadly, because our (1) now has a bogus signature
3519         }
3520
3521         #[test]
3522         fn test_multi_flight_update_fee() {
3523                 let nodes = create_network(2);
3524                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3525                 let channel_id = chan.2;
3526
3527                 macro_rules! get_feerate {
3528                         ($node: expr) => {{
3529                                 let chan_lock = $node.node.channel_state.lock().unwrap();
3530                                 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3531                                 chan.get_feerate()
3532                         }}
3533                 }
3534
3535                 // A                                        B
3536                 // update_fee/commitment_signed          ->
3537                 //                                       .- send (1) RAA and (2) commitment_signed
3538                 // update_fee (never committed)          ->
3539                 // (3) update_fee                        ->
3540                 // We have to manually generate the above update_fee, it is allowed by the protocol but we
3541                 // don't track which updates correspond to which revoke_and_ack responses so we're in
3542                 // AwaitingRAA mode and will not generate the update_fee yet.
3543                 //                                       <- (1) RAA delivered
3544                 // (3) is generated and send (4) CS      -.
3545                 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
3546                 // know the per_commitment_point to use for it.
3547                 //                                       <- (2) commitment_signed delivered
3548                 // revoke_and_ack                        ->
3549                 //                                          B should send no response here
3550                 // (4) commitment_signed delivered       ->
3551                 //                                       <- RAA/commitment_signed delivered
3552                 // revoke_and_ack                        ->
3553
3554                 // First nodes[0] generates an update_fee
3555                 let initial_feerate = get_feerate!(nodes[0]);
3556                 nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
3557                 check_added_monitors!(nodes[0], 1);
3558
3559                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3560                 assert_eq!(events_0.len(), 1);
3561                 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
3562                         Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
3563                                 (update_fee.as_ref().unwrap(), commitment_signed)
3564                         },
3565                         _ => panic!("Unexpected event"),
3566                 };
3567
3568                 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
3569                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap();
3570                 let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap();
3571                 check_added_monitors!(nodes[1], 1);
3572
3573                 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
3574                 // transaction:
3575                 nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
3576                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
3577
3578                 // Create the (3) update_fee message that nodes[0] will generate before it does...
3579                 let mut update_msg_2 = msgs::UpdateFee {
3580                         channel_id: update_msg_1.channel_id.clone(),
3581                         feerate_per_kw: (initial_feerate + 30) as u32,
3582                 };
3583
3584                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
3585
3586                 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
3587                 // Deliver (3)
3588                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
3589
3590                 // Deliver (1), generating (3) and (4)
3591                 let as_second_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap();
3592                 check_added_monitors!(nodes[0], 1);
3593                 assert!(as_second_update.as_ref().unwrap().update_add_htlcs.is_empty());
3594                 assert!(as_second_update.as_ref().unwrap().update_fulfill_htlcs.is_empty());
3595                 assert!(as_second_update.as_ref().unwrap().update_fail_htlcs.is_empty());
3596                 assert!(as_second_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
3597                 // Check that the update_fee newly generated matches what we delivered:
3598                 assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
3599                 assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
3600
3601                 // Deliver (2) commitment_signed
3602                 let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), bs_commitment_signed.as_ref().unwrap()).unwrap();
3603                 check_added_monitors!(nodes[0], 1);
3604                 assert!(as_commitment_signed.is_none());
3605
3606                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none());
3607                 check_added_monitors!(nodes[1], 1);
3608
3609                 // Delever (4)
3610                 let (bs_second_revoke, bs_second_commitment) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.unwrap().commitment_signed).unwrap();
3611                 check_added_monitors!(nodes[1], 1);
3612
3613                 assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none());
3614                 check_added_monitors!(nodes[0], 1);
3615
3616                 let (as_second_revoke, as_second_commitment) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment.unwrap()).unwrap();
3617                 assert!(as_second_commitment.is_none());
3618                 check_added_monitors!(nodes[0], 1);
3619
3620                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none());
3621                 check_added_monitors!(nodes[1], 1);
3622         }
3623
3624         #[test]
3625         fn test_update_fee_vanilla() {
3626                 let nodes = create_network(2);
3627                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3628                 let channel_id = chan.2;
3629
3630                 macro_rules! get_feerate {
3631                         ($node: expr) => {{
3632                                 let chan_lock = $node.node.channel_state.lock().unwrap();
3633                                 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3634                                 chan.get_feerate()
3635                         }}
3636                 }
3637
3638                 let feerate = get_feerate!(nodes[0]);
3639                 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
3640
3641                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3642                 assert_eq!(events_0.len(), 1);
3643                 let (update_msg, commitment_signed) = match events_0[0] {
3644                                 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3645                                 (update_fee.as_ref(), commitment_signed)
3646                         },
3647                         _ => panic!("Unexpected event"),
3648                 };
3649                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3650
3651                 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3652                 let commitment_signed = commitment_signed.unwrap();
3653                 check_added_monitors!(nodes[0], 1);
3654                 check_added_monitors!(nodes[1], 1);
3655
3656                 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3657                 assert!(resp_option.is_none());
3658                 check_added_monitors!(nodes[0], 1);
3659
3660                 let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
3661                 assert!(commitment_signed.is_none());
3662                 check_added_monitors!(nodes[0], 1);
3663
3664                 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
3665                 assert!(resp_option.is_none());
3666                 check_added_monitors!(nodes[1], 1);
3667         }
3668
3669         #[test]
3670         fn test_update_fee_with_fundee_update_add_htlc() {
3671                 let mut nodes = create_network(2);
3672                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3673                 let channel_id = chan.2;
3674
3675                 macro_rules! get_feerate {
3676                         ($node: expr) => {{
3677                                 let chan_lock = $node.node.channel_state.lock().unwrap();
3678                                 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3679                                 chan.get_feerate()
3680                         }}
3681                 }
3682
3683                 // balancing
3684                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
3685
3686                 let feerate = get_feerate!(nodes[0]);
3687                 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
3688
3689                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3690                 assert_eq!(events_0.len(), 1);
3691                 let (update_msg, commitment_signed) = match events_0[0] {
3692                                 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3693                                 (update_fee.as_ref(), commitment_signed)
3694                         },
3695                         _ => panic!("Unexpected event"),
3696                 };
3697                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3698                 check_added_monitors!(nodes[0], 1);
3699                 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3700                 let commitment_signed = commitment_signed.unwrap();
3701                 check_added_monitors!(nodes[1], 1);
3702
3703                 let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap();
3704
3705                 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]);
3706
3707                 // nothing happens since node[1] is in AwaitingRemoteRevoke
3708                 nodes[1].node.send_payment(route, our_payment_hash).unwrap();
3709                 {
3710                         let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
3711                         assert_eq!(added_monitors.len(), 0);
3712                         added_monitors.clear();
3713                 }
3714                 let events = nodes[0].node.get_and_clear_pending_events();
3715                 assert_eq!(events.len(), 0);
3716                 // node[1] has nothing to do
3717
3718                 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3719                 assert!(resp_option.is_none());
3720                 check_added_monitors!(nodes[0], 1);
3721
3722                 let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
3723                 assert!(commitment_signed.is_none());
3724                 check_added_monitors!(nodes[0], 1);
3725                 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
3726                 // AwaitingRemoteRevoke ends here
3727
3728                 let commitment_update = resp_option.unwrap();
3729                 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
3730                 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
3731                 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
3732                 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
3733                 assert_eq!(commitment_update.update_fee.is_none(), true);
3734
3735                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap();
3736                 let (revoke, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
3737                 check_added_monitors!(nodes[0], 1);
3738                 check_added_monitors!(nodes[1], 1);
3739                 let commitment_signed = commitment_signed.unwrap();
3740                 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap();
3741                 check_added_monitors!(nodes[1], 1);
3742                 assert!(resp_option.is_none());
3743
3744                 let (revoke, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap();
3745                 check_added_monitors!(nodes[1], 1);
3746                 assert!(commitment_signed.is_none());
3747                 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap();
3748                 check_added_monitors!(nodes[0], 1);
3749                 assert!(resp_option.is_none());
3750
3751                 let events = nodes[0].node.get_and_clear_pending_events();
3752                 assert_eq!(events.len(), 1);
3753                 match events[0] {
3754                         Event::PendingHTLCsForwardable { .. } => { },
3755                         _ => panic!("Unexpected event"),
3756                 };
3757                 nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
3758                 nodes[0].node.process_pending_htlc_forwards();
3759
3760                 let events = nodes[0].node.get_and_clear_pending_events();
3761                 assert_eq!(events.len(), 1);
3762                 match events[0] {
3763                         Event::PaymentReceived { .. } => { },
3764                         _ => panic!("Unexpected event"),
3765                 };
3766
3767                 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
3768
3769                 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
3770                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
3771                 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
3772         }
3773
3774         #[test]
3775         fn test_update_fee() {
3776                 let nodes = create_network(2);
3777                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3778                 let channel_id = chan.2;
3779
3780                 macro_rules! get_feerate {
3781                         ($node: expr) => {{
3782                                 let chan_lock = $node.node.channel_state.lock().unwrap();
3783                                 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3784                                 chan.get_feerate()
3785                         }}
3786                 }
3787
3788                 // A                                        B
3789                 // (1) update_fee/commitment_signed      ->
3790                 //                                       <- (2) revoke_and_ack
3791                 //                                       .- send (3) commitment_signed
3792                 // (4) update_fee/commitment_signed      ->
3793                 //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
3794                 //                                       <- (3) commitment_signed delivered
3795                 // send (6) revoke_and_ack               -.
3796                 //                                       <- (5) deliver revoke_and_ack
3797                 // (6) deliver revoke_and_ack            ->
3798                 //                                       .- send (7) commitment_signed in response to (4)
3799                 //                                       <- (7) deliver commitment_signed
3800                 // revoke_and_ack                        ->
3801
3802                 // Create and deliver (1)...
3803                 let feerate = get_feerate!(nodes[0]);
3804                 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
3805
3806                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3807                 assert_eq!(events_0.len(), 1);
3808                 let (update_msg, commitment_signed) = match events_0[0] {
3809                                 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3810                                 (update_fee.as_ref(), commitment_signed)
3811                         },
3812                         _ => panic!("Unexpected event"),
3813                 };
3814                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3815
3816                 // Generate (2) and (3):
3817                 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3818                 let commitment_signed_0 = commitment_signed.unwrap();
3819                 check_added_monitors!(nodes[0], 1);
3820                 check_added_monitors!(nodes[1], 1);
3821
3822                 // Deliver (2):
3823                 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3824                 assert!(resp_option.is_none());
3825                 check_added_monitors!(nodes[0], 1);
3826
3827                 // Create and deliver (4)...
3828                 nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
3829                 let events_0 = nodes[0].node.get_and_clear_pending_events();
3830                 assert_eq!(events_0.len(), 1);
3831                 let (update_msg, commitment_signed) = match events_0[0] {
3832                                 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3833                                 (update_fee.as_ref(), commitment_signed)
3834                         },
3835                         _ => panic!("Unexpected event"),
3836                 };
3837                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3838
3839                 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3840                 // ... creating (5)
3841                 assert!(commitment_signed.is_none());
3842                 check_added_monitors!(nodes[0], 1);
3843                 check_added_monitors!(nodes[1], 1);
3844
3845                 // Handle (3), creating (6):
3846                 let (revoke_msg_0, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap();
3847                 assert!(commitment_signed.is_none());
3848                 check_added_monitors!(nodes[0], 1);
3849
3850                 // Deliver (5):
3851                 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3852                 assert!(resp_option.is_none());
3853                 check_added_monitors!(nodes[0], 1);
3854
3855                 // Deliver (6), creating (7):
3856                 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap();
3857                 let commitment_signed = resp_option.unwrap().commitment_signed;
3858                 check_added_monitors!(nodes[1], 1);
3859
3860                 // Deliver (7)
3861                 let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
3862                 assert!(commitment_signed.is_none());
3863                 check_added_monitors!(nodes[0], 1);
3864                 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
3865                 assert!(resp_option.is_none());
3866                 check_added_monitors!(nodes[1], 1);
3867
3868                 assert_eq!(get_feerate!(nodes[0]), feerate + 30);
3869                 assert_eq!(get_feerate!(nodes[1]), feerate + 30);
3870                 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
3871         }
3872
3873         #[test]
3874         fn fake_network_test() {
3875                 // Simple test which builds a network of ChannelManagers, connects them to each other, and
3876                 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
3877                 let nodes = create_network(4);
3878
3879                 // Create some initial channels
3880                 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3881                 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3882                 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
3883
3884                 // Rebalance the network a bit by relaying one payment through all the channels...
3885                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3886                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3887                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3888                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3889
3890                 // Send some more payments
3891                 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
3892                 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
3893                 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
3894
3895                 // Test failure packets
3896                 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
3897                 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
3898
3899                 // Add a new channel that skips 3
3900                 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
3901
3902                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
3903                 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
3904                 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3905                 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3906                 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3907                 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3908                 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3909
3910                 // Do some rebalance loop payments, simultaneously
3911                 let mut hops = Vec::with_capacity(3);
3912                 hops.push(RouteHop {
3913                         pubkey: nodes[2].node.get_our_node_id(),
3914                         short_channel_id: chan_2.0.contents.short_channel_id,
3915                         fee_msat: 0,
3916                         cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
3917                 });
3918                 hops.push(RouteHop {
3919                         pubkey: nodes[3].node.get_our_node_id(),
3920                         short_channel_id: chan_3.0.contents.short_channel_id,
3921                         fee_msat: 0,
3922                         cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
3923                 });
3924                 hops.push(RouteHop {
3925                         pubkey: nodes[1].node.get_our_node_id(),
3926                         short_channel_id: chan_4.0.contents.short_channel_id,
3927                         fee_msat: 1000000,
3928                         cltv_expiry_delta: TEST_FINAL_CLTV,
3929                 });
3930                 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
3931                 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
3932                 let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
3933
3934                 let mut hops = Vec::with_capacity(3);
3935                 hops.push(RouteHop {
3936                         pubkey: nodes[3].node.get_our_node_id(),
3937                         short_channel_id: chan_4.0.contents.short_channel_id,
3938                         fee_msat: 0,
3939                         cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
3940                 });
3941                 hops.push(RouteHop {
3942                         pubkey: nodes[2].node.get_our_node_id(),
3943                         short_channel_id: chan_3.0.contents.short_channel_id,
3944                         fee_msat: 0,
3945                         cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
3946                 });
3947                 hops.push(RouteHop {
3948                         pubkey: nodes[1].node.get_our_node_id(),
3949                         short_channel_id: chan_2.0.contents.short_channel_id,
3950                         fee_msat: 1000000,
3951                         cltv_expiry_delta: TEST_FINAL_CLTV,
3952                 });
3953                 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
3954                 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
3955                 let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
3956
3957                 // Claim the rebalances...
3958                 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
3959                 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
3960
3961                 // Add a duplicate new channel from 2 to 4
3962                 let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
3963
3964                 // Send some payments across both channels
3965                 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
3966                 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
3967                 let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
3968
3969                 route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
3970
3971                 //TODO: Test that routes work again here as we've been notified that the channel is full
3972
3973                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
3974                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
3975                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
3976
3977                 // Close down the channels...
3978                 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
3979                 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
3980                 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
3981                 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
3982                 close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
3983         }
3984
3985         #[test]
3986         fn duplicate_htlc_test() {
3987                 // Test that we accept duplicate payment_hash HTLCs across the network and that
3988                 // claiming/failing them are all separate and don't effect each other
3989                 let mut nodes = create_network(6);
3990
3991                 // Create some initial channels to route via 3 to 4/5 from 0/1/2
3992                 create_announced_chan_between_nodes(&nodes, 0, 3);
3993                 create_announced_chan_between_nodes(&nodes, 1, 3);
3994                 create_announced_chan_between_nodes(&nodes, 2, 3);
3995                 create_announced_chan_between_nodes(&nodes, 3, 4);
3996                 create_announced_chan_between_nodes(&nodes, 3, 5);
3997
3998                 let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
3999
4000                 *nodes[0].network_payment_count.borrow_mut() -= 1;
4001                 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
4002
4003                 *nodes[0].network_payment_count.borrow_mut() -= 1;
4004                 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
4005
4006                 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
4007                 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
4008                 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
4009         }
4010
4011         #[derive(PartialEq)]
4012         enum HTLCType { NONE, TIMEOUT, SUCCESS }
4013         /// Tests that the given node has broadcast transactions for the given Channel
4014         ///
4015         /// First checks that the latest local commitment tx has been broadcast, unless an explicit
4016         /// commitment_tx is provided, which may be used to test that a remote commitment tx was
4017         /// broadcast and the revoked outputs were claimed.
4018         ///
4019         /// Next tests that there is (or is not) a transaction that spends the commitment transaction
4020         /// that appears to be the type of HTLC transaction specified in has_htlc_tx.
4021         ///
4022         /// All broadcast transactions must be accounted for in one of the above three types of we'll
4023         /// also fail.
4024         fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
4025                 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
4026                 assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
4027
4028                 let mut res = Vec::with_capacity(2);
4029                 node_txn.retain(|tx| {
4030                         if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
4031                                 check_spends!(tx, chan.3.clone());
4032                                 if commitment_tx.is_none() {
4033                                         res.push(tx.clone());
4034                                 }
4035                                 false
4036                         } else { true }
4037                 });
4038                 if let Some(explicit_tx) = commitment_tx {
4039                         res.push(explicit_tx.clone());
4040                 }
4041
4042                 assert_eq!(res.len(), 1);
4043
4044                 if has_htlc_tx != HTLCType::NONE {
4045                         node_txn.retain(|tx| {
4046                                 if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
4047                                         check_spends!(tx, res[0].clone());
4048                                         if has_htlc_tx == HTLCType::TIMEOUT {
4049                                                 assert!(tx.lock_time != 0);
4050                                         } else {
4051                                                 assert!(tx.lock_time == 0);
4052                                         }
4053                                         res.push(tx.clone());
4054                                         false
4055                                 } else { true }
4056                         });
4057                         assert_eq!(res.len(), 2);
4058                 }
4059
4060                 assert!(node_txn.is_empty());
4061                 res
4062         }
4063
4064         /// Tests that the given node has broadcast a claim transaction against the provided revoked
4065         /// HTLC transaction.
4066         fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
4067                 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
4068                 assert_eq!(node_txn.len(), 1);
4069                 node_txn.retain(|tx| {
4070                         if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
4071                                 check_spends!(tx, revoked_tx.clone());
4072                                 false
4073                         } else { true }
4074                 });
4075                 assert!(node_txn.is_empty());
4076         }
4077
4078         fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
4079                 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
4080
4081                 assert!(node_txn.len() >= 1);
4082                 assert_eq!(node_txn[0].input.len(), 1);
4083                 let mut found_prev = false;
4084
4085                 for tx in prev_txn {
4086                         if node_txn[0].input[0].previous_output.txid == tx.txid() {
4087                                 check_spends!(node_txn[0], tx.clone());
4088                                 assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
4089                                 assert_eq!(tx.input.len(), 1); // must spend a commitment tx
4090
4091                                 found_prev = true;
4092                                 break;
4093                         }
4094                 }
4095                 assert!(found_prev);
4096
4097                 let mut res = Vec::new();
4098                 mem::swap(&mut *node_txn, &mut res);
4099                 res
4100         }
4101
4102         fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
4103                 let events_1 = nodes[a].node.get_and_clear_pending_events();
4104                 assert_eq!(events_1.len(), 1);
4105                 let as_update = match events_1[0] {
4106                         Event::BroadcastChannelUpdate { ref msg } => {
4107                                 msg.clone()
4108                         },
4109                         _ => panic!("Unexpected event"),
4110                 };
4111
4112                 let events_2 = nodes[b].node.get_and_clear_pending_events();
4113                 assert_eq!(events_2.len(), 1);
4114                 let bs_update = match events_2[0] {
4115                         Event::BroadcastChannelUpdate { ref msg } => {
4116                                 msg.clone()
4117                         },
4118                         _ => panic!("Unexpected event"),
4119                 };
4120
4121                 for node in nodes {
4122                         node.router.handle_channel_update(&as_update).unwrap();
4123                         node.router.handle_channel_update(&bs_update).unwrap();
4124                 }
4125         }
4126
4127         #[test]
4128         fn channel_reserve_test() {
4129                 use util::rng;
4130                 use std::sync::atomic::Ordering;
4131                 use ln::msgs::HandleError;
4132
4133                 macro_rules! get_channel_value_stat {
4134                         ($node: expr, $channel_id: expr) => {{
4135                                 let chan_lock = $node.node.channel_state.lock().unwrap();
4136                                 let chan = chan_lock.by_id.get(&$channel_id).unwrap();
4137                                 chan.get_value_stat()
4138                         }}
4139                 }
4140
4141                 let mut nodes = create_network(3);
4142                 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001);
4143                 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001);
4144
4145                 let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
4146                 let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
4147
4148                 let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
4149                 let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
4150
4151                 macro_rules! get_route_and_payment_hash {
4152                         ($recv_value: expr) => {{
4153                                 let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
4154                                 let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
4155                                 (route, payment_hash, payment_preimage)
4156                         }}
4157                 };
4158
4159                 macro_rules! expect_pending_htlcs_forwardable {
4160                         ($node: expr) => {{
4161                                 let events = $node.node.get_and_clear_pending_events();
4162                                 assert_eq!(events.len(), 1);
4163                                 match events[0] {
4164                                         Event::PendingHTLCsForwardable { .. } => { },
4165                                         _ => panic!("Unexpected event"),
4166                                 };
4167                                 $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
4168                                 $node.node.process_pending_htlc_forwards();
4169                         }}
4170                 };
4171
4172                 macro_rules! expect_forward {
4173                         ($node: expr) => {{
4174                                 let mut events = $node.node.get_and_clear_pending_events();
4175                                 assert_eq!(events.len(), 1);
4176                                 check_added_monitors!($node, 1);
4177                                 let payment_event = SendEvent::from_event(events.remove(0));
4178                                 payment_event
4179                         }}
4180                 }
4181
4182                 macro_rules! expect_payment_received {
4183                         ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
4184                                 let events = $node.node.get_and_clear_pending_events();
4185                                 assert_eq!(events.len(), 1);
4186                                 match events[0] {
4187                                         Event::PaymentReceived { ref payment_hash, amt } => {
4188                                                 assert_eq!($expected_payment_hash, *payment_hash);
4189                                                 assert_eq!($expected_recv_value, amt);
4190                                         },
4191                                         _ => panic!("Unexpected event"),
4192                                 }
4193                         }
4194                 };
4195
4196                 let feemsat = 239; // somehow we know?
4197                 let total_fee_msat = (nodes.len() - 2) as u64 * 239;
4198
4199                 let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
4200
4201                 // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
4202                 {
4203                         let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
4204                         assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
4205                         let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
4206                         match err {
4207                                 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
4208                                 _ => panic!("Unknown error variants"),
4209                         }
4210                 }
4211
4212                 let mut htlc_id = 0;
4213                 // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
4214                 // nodes[0]'s wealth
4215                 loop {
4216                         let amt_msat = recv_value_0 + total_fee_msat;
4217                         if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
4218                                 break;
4219                         }
4220                         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
4221                         htlc_id += 1;
4222
4223                         let (stat01_, stat11_, stat12_, stat22_) = (
4224                                 get_channel_value_stat!(nodes[0], chan_1.2),
4225                                 get_channel_value_stat!(nodes[1], chan_1.2),
4226                                 get_channel_value_stat!(nodes[1], chan_2.2),
4227                                 get_channel_value_stat!(nodes[2], chan_2.2),
4228                         );
4229
4230                         assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
4231                         assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
4232                         assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
4233                         assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
4234                         stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
4235                 }
4236
4237                 {
4238                         let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
4239                         // attempt to get channel_reserve violation
4240                         let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
4241                         let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
4242                         match err {
4243                                 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
4244                                 _ => panic!("Unknown error variants"),
4245                         }
4246                 }
4247
4248                 // adding pending output
4249                 let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
4250                 let amt_msat_1 = recv_value_1 + total_fee_msat;
4251
4252                 let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
4253                 let payment_event_1 = {
4254                         nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
4255                         check_added_monitors!(nodes[0], 1);
4256
4257                         let mut events = nodes[0].node.get_and_clear_pending_events();
4258                         assert_eq!(events.len(), 1);
4259                         SendEvent::from_event(events.remove(0))
4260                 };
4261                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
4262
4263                 // channel reserve test with htlc pending output > 0
4264                 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
4265                 {
4266                         let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
4267                         match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
4268                                 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
4269                                 _ => panic!("Unknown error variants"),
4270                         }
4271                 }
4272
4273                 {
4274                         // test channel_reserve test on nodes[1] side
4275                         let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
4276
4277                         // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
4278                         let secp_ctx = Secp256k1::new();
4279                         let session_priv = SecretKey::from_slice(&secp_ctx, &{
4280                                 let mut session_key = [0; 32];
4281                                 rng::fill_bytes(&mut session_key);
4282                                 session_key
4283                         }).expect("RNG is bad!");
4284
4285                         let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
4286                         let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
4287                         let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap();
4288                         let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
4289                         let msg = msgs::UpdateAddHTLC {
4290                                 channel_id: chan_1.2,
4291                                 htlc_id,
4292                                 amount_msat: htlc_msat,
4293                                 payment_hash: our_payment_hash,
4294                                 cltv_expiry: htlc_cltv,
4295                                 onion_routing_packet: onion_packet,
4296                         };
4297
4298                         let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
4299                         match err {
4300                                 HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
4301                         }
4302                 }
4303
4304                 // split the rest to test holding cell
4305                 let recv_value_21 = recv_value_2/2;
4306                 let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
4307                 {
4308                         let stat = get_channel_value_stat!(nodes[0], chan_1.2);
4309                         assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
4310                 }
4311
4312                 // now see if they go through on both sides
4313                 let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
4314                 // but this will stuck in the holding cell
4315                 nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
4316                 check_added_monitors!(nodes[0], 0);
4317                 let events = nodes[0].node.get_and_clear_pending_events();
4318                 assert_eq!(events.len(), 0);
4319
4320                 // test with outbound holding cell amount > 0
4321                 {
4322                         let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
4323                         match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
4324                                 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
4325                                 _ => panic!("Unknown error variants"),
4326                         }
4327                 }
4328
4329                 let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
4330                 // this will also stuck in the holding cell
4331                 nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
4332                 check_added_monitors!(nodes[0], 0);
4333                 let events = nodes[0].node.get_and_clear_pending_events();
4334                 assert_eq!(events.len(), 0);
4335
4336                 // flush the pending htlc
4337                 let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
4338                 check_added_monitors!(nodes[1], 1);
4339
4340                 let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap();
4341                 check_added_monitors!(nodes[0], 1);
4342                 let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
4343                 assert!(bs_none.is_none());
4344                 check_added_monitors!(nodes[0], 1);
4345                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
4346                 check_added_monitors!(nodes[1], 1);
4347
4348                 expect_pending_htlcs_forwardable!(nodes[1]);
4349
4350                 let ref payment_event_11 = expect_forward!(nodes[1]);
4351                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
4352                 commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
4353
4354                 expect_pending_htlcs_forwardable!(nodes[2]);
4355                 expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
4356
4357                 // flush the htlcs in the holding cell
4358                 assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
4359                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
4360                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
4361                 commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
4362                 expect_pending_htlcs_forwardable!(nodes[1]);
4363
4364                 let ref payment_event_3 = expect_forward!(nodes[1]);
4365                 assert_eq!(payment_event_3.msgs.len(), 2);
4366                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
4367                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
4368
4369                 commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
4370                 expect_pending_htlcs_forwardable!(nodes[2]);
4371
4372                 let events = nodes[2].node.get_and_clear_pending_events();
4373                 assert_eq!(events.len(), 2);
4374                 match events[0] {
4375                         Event::PaymentReceived { ref payment_hash, amt } => {
4376                                 assert_eq!(our_payment_hash_21, *payment_hash);
4377                                 assert_eq!(recv_value_21, amt);
4378                         },
4379                         _ => panic!("Unexpected event"),
4380                 }
4381                 match events[1] {
4382                         Event::PaymentReceived { ref payment_hash, amt } => {
4383                                 assert_eq!(our_payment_hash_22, *payment_hash);
4384                                 assert_eq!(recv_value_22, amt);
4385                         },
4386                         _ => panic!("Unexpected event"),
4387                 }
4388
4389                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
4390                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
4391                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
4392
4393                 let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
4394                 let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
4395                 assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
4396                 assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
4397
4398                 let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
4399                 assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
4400         }
4401
4402         #[test]
4403         fn channel_monitor_network_test() {
4404                 // Simple test which builds a network of ChannelManagers, connects them to each other, and
4405                 // tests that ChannelMonitor is able to recover from various states.
4406                 let nodes = create_network(5);
4407
4408                 // Create some initial channels
4409                 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4410                 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4411                 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
4412                 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
4413
4414                 // Rebalance the network a bit by relaying one payment through all the channels...
4415                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
4416                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
4417                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
4418                 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
4419
4420                 // Simple case with no pending HTLCs:
4421                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
4422                 {
4423                         let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
4424                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4425                         nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
4426                         test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
4427                 }
4428                 get_announce_close_broadcast_events(&nodes, 0, 1);
4429                 assert_eq!(nodes[0].node.list_channels().len(), 0);
4430                 assert_eq!(nodes[1].node.list_channels().len(), 1);
4431
4432                 // One pending HTLC is discarded by the force-close:
4433                 let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
4434
4435                 // Simple case of one pending HTLC to HTLC-Timeout
4436                 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
4437                 {
4438                         let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
4439                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4440                         nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
4441                         test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
4442                 }
4443                 get_announce_close_broadcast_events(&nodes, 1, 2);
4444                 assert_eq!(nodes[1].node.list_channels().len(), 0);
4445                 assert_eq!(nodes[2].node.list_channels().len(), 1);
4446
4447                 macro_rules! claim_funds {
4448                         ($node: expr, $prev_node: expr, $preimage: expr) => {
4449                                 {
4450                                         assert!($node.node.claim_funds($preimage));
4451                                         check_added_monitors!($node, 1);
4452
4453                                         let events = $node.node.get_and_clear_pending_events();
4454                                         assert_eq!(events.len(), 1);
4455                                         match events[0] {
4456                                                 Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
4457                                                         assert!(update_add_htlcs.is_empty());
4458                                                         assert!(update_fail_htlcs.is_empty());
4459                                                         assert_eq!(*node_id, $prev_node.node.get_our_node_id());
4460                                                 },
4461                                                 _ => panic!("Unexpected event"),
4462                                         };
4463                                 }
4464                         }
4465                 }
4466
4467                 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
4468                 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
4469                 nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
4470                 {
4471                         let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
4472
4473                         // Claim the payment on nodes[3], giving it knowledge of the preimage
4474                         claim_funds!(nodes[3], nodes[2], payment_preimage_1);
4475
4476                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4477                         nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
4478
4479                         check_preimage_claim(&nodes[3], &node_txn);
4480                 }
4481                 get_announce_close_broadcast_events(&nodes, 2, 3);
4482                 assert_eq!(nodes[2].node.list_channels().len(), 0);
4483                 assert_eq!(nodes[3].node.list_channels().len(), 1);
4484
4485                 { // Cheat and reset nodes[4]'s height to 1
4486                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4487                         nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
4488                 }
4489
4490                 assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
4491                 assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
4492                 // One pending HTLC to time out:
4493                 let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
4494                 // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
4495                 // buffer space).
4496
4497                 {
4498                         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4499                         nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
4500                         for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
4501                                 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4502                                 nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
4503                         }
4504
4505                         let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
4506
4507                         // Claim the payment on nodes[4], giving it knowledge of the preimage
4508                         claim_funds!(nodes[4], nodes[3], payment_preimage_2);
4509
4510                         header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4511                         nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
4512                         for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
4513                                 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4514                                 nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
4515                         }
4516
4517                         test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
4518
4519                         header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4520                         nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
4521
4522                         check_preimage_claim(&nodes[4], &node_txn);
4523                 }
4524                 get_announce_close_broadcast_events(&nodes, 3, 4);
4525                 assert_eq!(nodes[3].node.list_channels().len(), 0);
4526                 assert_eq!(nodes[4].node.list_channels().len(), 0);
4527
4528                 // Create some new channels:
4529                 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
4530
4531                 // A pending HTLC which will be revoked:
4532                 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4533                 // Get the will-be-revoked local txn from nodes[0]
4534                 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
4535                 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
4536                 assert_eq!(revoked_local_txn[0].input.len(), 1);
4537                 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
4538                 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
4539                 assert_eq!(revoked_local_txn[1].input.len(), 1);
4540                 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
4541                 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
4542                 // Revoke the old state
4543                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
4544
4545                 {
4546                         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4547                         nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4548                         {
4549                                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4550                                 assert_eq!(node_txn.len(), 3);
4551                                 assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
4552                                 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
4553
4554                                 check_spends!(node_txn[0], revoked_local_txn[0].clone());
4555                                 node_txn.swap_remove(0);
4556                         }
4557                         test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
4558
4559                         nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4560                         let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
4561                         header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4562                         nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
4563                         test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
4564                 }
4565                 get_announce_close_broadcast_events(&nodes, 0, 1);
4566                 assert_eq!(nodes[0].node.list_channels().len(), 0);
4567                 assert_eq!(nodes[1].node.list_channels().len(), 0);
4568         }
4569
4570         #[test]
4571         fn revoked_output_claim() {
4572                 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
4573                 // transaction is broadcast by its counterparty
4574                 let nodes = create_network(2);
4575                 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4576                 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
4577                 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
4578                 assert_eq!(revoked_local_txn.len(), 1);
4579                 // Only output is the full channel value back to nodes[0]:
4580                 assert_eq!(revoked_local_txn[0].output.len(), 1);
4581                 // Send a payment through, updating everyone's latest commitment txn
4582                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
4583
4584                 // Inform nodes[1] that nodes[0] broadcast a stale tx
4585                 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4586                 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4587                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4588                 assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
4589
4590                 assert_eq!(node_txn[0], node_txn[2]);
4591
4592                 check_spends!(node_txn[0], revoked_local_txn[0].clone());
4593                 check_spends!(node_txn[1], chan_1.3.clone());
4594
4595                 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
4596                 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4597                 get_announce_close_broadcast_events(&nodes, 0, 1);
4598         }
4599
4600         #[test]
4601         fn claim_htlc_outputs_shared_tx() {
4602                 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
4603                 let nodes = create_network(2);
4604
4605                 // Create some new channel:
4606                 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4607
4608                 // Rebalance the network to generate htlc in the two directions
4609                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4610                 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
4611                 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4612                 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
4613
4614                 // Get the will-be-revoked local txn from node[0]
4615                 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
4616                 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
4617                 assert_eq!(revoked_local_txn[0].input.len(), 1);
4618                 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4619                 assert_eq!(revoked_local_txn[1].input.len(), 1);
4620                 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
4621                 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
4622                 check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
4623
4624                 //Revoke the old state
4625                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
4626
4627                 {
4628                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4629
4630                         nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4631
4632                         nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4633                         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4634                         assert_eq!(node_txn.len(), 4);
4635
4636                         assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
4637                         check_spends!(node_txn[0], revoked_local_txn[0].clone());
4638
4639                         assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
4640
4641                         let mut witness_lens = BTreeSet::new();
4642                         witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
4643                         witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
4644                         witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
4645                         assert_eq!(witness_lens.len(), 3);
4646                         assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
4647                         assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
4648                         assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
4649
4650                         // Next nodes[1] broadcasts its current local tx state:
4651                         assert_eq!(node_txn[1].input.len(), 1);
4652                         assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
4653
4654                         assert_eq!(node_txn[2].input.len(), 1);
4655                         let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
4656                         assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
4657                         assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
4658                         assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
4659                         assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
4660                 }
4661                 get_announce_close_broadcast_events(&nodes, 0, 1);
4662                 assert_eq!(nodes[0].node.list_channels().len(), 0);
4663                 assert_eq!(nodes[1].node.list_channels().len(), 0);
4664         }
4665
4666         #[test]
4667         fn claim_htlc_outputs_single_tx() {
4668                 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
4669                 let nodes = create_network(2);
4670
4671                 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4672
4673                 // Rebalance the network to generate htlc in the two directions
4674                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4675                 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
4676                 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
4677                 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4678                 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
4679
4680                 // Get the will-be-revoked local txn from node[0]
4681                 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
4682
4683                 //Revoke the old state
4684                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
4685
4686                 {
4687                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4688
4689                         nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
4690
4691                         nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
4692                         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4693                         assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan)
4694
4695                         assert_eq!(node_txn[0], node_txn[7]);
4696                         assert_eq!(node_txn[1], node_txn[8]);
4697                         assert_eq!(node_txn[2], node_txn[9]);
4698                         assert_eq!(node_txn[3], node_txn[10]);
4699                         assert_eq!(node_txn[4], node_txn[11]);
4700                         assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger
4701                         assert_eq!(node_txn[4], node_txn[6]);
4702
4703                         assert_eq!(node_txn[0].input.len(), 1);
4704                         assert_eq!(node_txn[1].input.len(), 1);
4705                         assert_eq!(node_txn[2].input.len(), 1);
4706
4707                         let mut revoked_tx_map = HashMap::new();
4708                         revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone());
4709                         node_txn[0].verify(&revoked_tx_map).unwrap();
4710                         node_txn[1].verify(&revoked_tx_map).unwrap();
4711                         node_txn[2].verify(&revoked_tx_map).unwrap();
4712
4713                         let mut witness_lens = BTreeSet::new();
4714                         witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
4715                         witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
4716                         witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
4717                         assert_eq!(witness_lens.len(), 3);
4718                         assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
4719                         assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
4720                         assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
4721
4722                         assert_eq!(node_txn[3].input.len(), 1);
4723                         check_spends!(node_txn[3], chan_1.3.clone());
4724
4725                         assert_eq!(node_txn[4].input.len(), 1);
4726                         let witness_script = node_txn[4].input[0].witness.last().unwrap();
4727                         assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
4728                         assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
4729                         assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
4730                         assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
4731                 }
4732                 get_announce_close_broadcast_events(&nodes, 0, 1);
4733                 assert_eq!(nodes[0].node.list_channels().len(), 0);
4734                 assert_eq!(nodes[1].node.list_channels().len(), 0);
4735         }
4736
4737         #[test]
4738         fn test_htlc_ignore_latest_remote_commitment() {
4739                 // Test that HTLC transactions spending the latest remote commitment transaction are simply
4740                 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
4741                 let nodes = create_network(2);
4742                 create_announced_chan_between_nodes(&nodes, 0, 1);
4743
4744                 route_payment(&nodes[0], &[&nodes[1]], 10000000);
4745                 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
4746                 {
4747                         let events = nodes[0].node.get_and_clear_pending_events();
4748                         assert_eq!(events.len(), 1);
4749                         match events[0] {
4750                                 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4751                                         assert_eq!(flags & 0b10, 0b10);
4752                                 },
4753                                 _ => panic!("Unexpected event"),
4754                         }
4755                 }
4756
4757                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
4758                 assert_eq!(node_txn.len(), 2);
4759
4760                 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4761                 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
4762
4763                 {
4764                         let events = nodes[1].node.get_and_clear_pending_events();
4765                         assert_eq!(events.len(), 1);
4766                         match events[0] {
4767                                 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4768                                         assert_eq!(flags & 0b10, 0b10);
4769                                 },
4770                                 _ => panic!("Unexpected event"),
4771                         }
4772                 }
4773
4774                 // Duplicate the block_connected call since this may happen due to other listeners
4775                 // registering new transactions
4776                 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
4777         }
4778
4779         #[test]
4780         fn test_force_close_fail_back() {
4781                 // Check which HTLCs are failed-backwards on channel force-closure
4782                 let mut nodes = create_network(3);
4783                 create_announced_chan_between_nodes(&nodes, 0, 1);
4784                 create_announced_chan_between_nodes(&nodes, 1, 2);
4785
4786                 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
4787
4788                 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4789
4790                 let mut payment_event = {
4791                         nodes[0].node.send_payment(route, our_payment_hash).unwrap();
4792                         check_added_monitors!(nodes[0], 1);
4793
4794                         let mut events = nodes[0].node.get_and_clear_pending_events();
4795                         assert_eq!(events.len(), 1);
4796                         SendEvent::from_event(events.remove(0))
4797                 };
4798
4799                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4800                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4801
4802                 let events_1 = nodes[1].node.get_and_clear_pending_events();
4803                 assert_eq!(events_1.len(), 1);
4804                 match events_1[0] {
4805                         Event::PendingHTLCsForwardable { .. } => { },
4806                         _ => panic!("Unexpected event"),
4807                 };
4808
4809                 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
4810                 nodes[1].node.process_pending_htlc_forwards();
4811
4812                 let mut events_2 = nodes[1].node.get_and_clear_pending_events();
4813                 assert_eq!(events_2.len(), 1);
4814                 payment_event = SendEvent::from_event(events_2.remove(0));
4815                 assert_eq!(payment_event.msgs.len(), 1);
4816
4817                 check_added_monitors!(nodes[1], 1);
4818                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4819                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
4820                 check_added_monitors!(nodes[2], 1);
4821
4822                 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
4823                 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
4824                 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
4825
4826                 nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
4827                 let events_3 = nodes[2].node.get_and_clear_pending_events();
4828                 assert_eq!(events_3.len(), 1);
4829                 match events_3[0] {
4830                         Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4831                                 assert_eq!(flags & 0b10, 0b10);
4832                         },
4833                         _ => panic!("Unexpected event"),
4834                 }
4835
4836                 let tx = {
4837                         let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
4838                         // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
4839                         // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
4840                         // back to nodes[1] upon timeout otherwise.
4841                         assert_eq!(node_txn.len(), 1);
4842                         node_txn.remove(0)
4843                 };
4844
4845                 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4846                 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
4847
4848                 let events_4 = nodes[1].node.get_and_clear_pending_events();
4849                 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
4850                 assert_eq!(events_4.len(), 1);
4851                 match events_4[0] {
4852                         Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4853                                 assert_eq!(flags & 0b10, 0b10);
4854                         },
4855                         _ => panic!("Unexpected event"),
4856                 }
4857
4858                 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
4859                 {
4860                         let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
4861                         monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap()
4862                                 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
4863                 }
4864                 nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
4865                 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
4866                 assert_eq!(node_txn.len(), 1);
4867                 assert_eq!(node_txn[0].input.len(), 1);
4868                 assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
4869                 assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
4870                 assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
4871
4872                 check_spends!(node_txn[0], tx);
4873         }
4874
4875         #[test]
4876         fn test_unconf_chan() {
4877                 // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
4878                 let nodes = create_network(2);
4879                 create_announced_chan_between_nodes(&nodes, 0, 1);
4880
4881                 let channel_state = nodes[0].node.channel_state.lock().unwrap();
4882                 assert_eq!(channel_state.by_id.len(), 1);
4883                 assert_eq!(channel_state.short_to_id.len(), 1);
4884                 mem::drop(channel_state);
4885
4886                 let mut headers = Vec::new();
4887                 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4888                 headers.push(header.clone());
4889                 for _i in 2..100 {
4890                         header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4891                         headers.push(header.clone());
4892                 }
4893                 while !headers.is_empty() {
4894                         nodes[0].node.block_disconnected(&headers.pop().unwrap());
4895                 }
4896                 {
4897                         let events = nodes[0].node.get_and_clear_pending_events();
4898                         assert_eq!(events.len(), 1);
4899                         match events[0] {
4900                                 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4901                                         assert_eq!(flags & 0b10, 0b10);
4902                                 },
4903                                 _ => panic!("Unexpected event"),
4904                         }
4905                 }
4906                 let channel_state = nodes[0].node.channel_state.lock().unwrap();
4907                 assert_eq!(channel_state.by_id.len(), 0);
4908                 assert_eq!(channel_state.short_to_id.len(), 0);
4909         }
4910
4911         /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
4912         /// for claims/fails they are separated out.
4913         fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
4914                 let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id());
4915                 let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id());
4916
4917                 let mut resp_1 = Vec::new();
4918                 for msg in reestablish_1 {
4919                         resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap());
4920                 }
4921                 if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
4922                         check_added_monitors!(node_b, 1);
4923                 } else {
4924                         check_added_monitors!(node_b, 0);
4925                 }
4926
4927                 let mut resp_2 = Vec::new();
4928                 for msg in reestablish_2 {
4929                         resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap());
4930                 }
4931                 if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
4932                         check_added_monitors!(node_a, 1);
4933                 } else {
4934                         check_added_monitors!(node_a, 0);
4935                 }
4936
4937                 // We dont yet support both needing updates, as that would require a different commitment dance:
4938                 assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
4939                         (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
4940
4941                 for chan_msgs in resp_1.drain(..) {
4942                         if pre_all_htlcs {
4943                                 let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
4944                                 let _announcement_sigs_opt = a.unwrap();
4945                                 //TODO: Test announcement_sigs re-sending when we've implemented it
4946                         } else {
4947                                 assert!(chan_msgs.0.is_none());
4948                         }
4949                         if pending_raa.0 {
4950                                 assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
4951                                 check_added_monitors!(node_a, 1);
4952                         } else {
4953                                 assert!(chan_msgs.1.is_none());
4954                         }
4955                         if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
4956                                 let commitment_update = chan_msgs.2.unwrap();
4957                                 if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
4958                                         assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
4959                                 } else {
4960                                         assert!(commitment_update.update_add_htlcs.is_empty());
4961                                 }
4962                                 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
4963                                 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
4964                                 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
4965                                 for update_add in commitment_update.update_add_htlcs {
4966                                         node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
4967                                 }
4968                                 for update_fulfill in commitment_update.update_fulfill_htlcs {
4969                                         node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
4970                                 }
4971                                 for update_fail in commitment_update.update_fail_htlcs {
4972                                         node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
4973                                 }
4974
4975                                 if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
4976                                         commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
4977                                 } else {
4978                                         let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
4979                                         check_added_monitors!(node_a, 1);
4980                                         assert!(as_commitment_signed.is_none());
4981                                         assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
4982                                         check_added_monitors!(node_b, 1);
4983                                 }
4984                         } else {
4985                                 assert!(chan_msgs.2.is_none());
4986                         }
4987                 }
4988
4989                 for chan_msgs in resp_2.drain(..) {
4990                         if pre_all_htlcs {
4991                                 let _announcement_sigs_opt = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
4992                                 //TODO: Test announcement_sigs re-sending when we've implemented it
4993                         } else {
4994                                 assert!(chan_msgs.0.is_none());
4995                         }
4996                         if pending_raa.1 {
4997                                 assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
4998                                 check_added_monitors!(node_b, 1);
4999                         } else {
5000                                 assert!(chan_msgs.1.is_none());
5001                         }
5002                         if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
5003                                 let commitment_update = chan_msgs.2.unwrap();
5004                                 if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
5005                                         assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
5006                                 }
5007                                 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
5008                                 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
5009                                 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
5010                                 for update_add in commitment_update.update_add_htlcs {
5011                                         node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
5012                                 }
5013                                 for update_fulfill in commitment_update.update_fulfill_htlcs {
5014                                         node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
5015                                 }
5016                                 for update_fail in commitment_update.update_fail_htlcs {
5017                                         node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
5018                                 }
5019
5020                                 if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
5021                                         commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
5022                                 } else {
5023                                         let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
5024                                         check_added_monitors!(node_b, 1);
5025                                         assert!(bs_commitment_signed.is_none());
5026                                         assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
5027                                         check_added_monitors!(node_a, 1);
5028                                 }
5029                         } else {
5030                                 assert!(chan_msgs.2.is_none());
5031                         }
5032                 }
5033         }
5034
5035         #[test]
5036         fn test_simple_peer_disconnect() {
5037                 // Test that we can reconnect when there are no lost messages
5038                 let nodes = create_network(3);
5039                 create_announced_chan_between_nodes(&nodes, 0, 1);
5040                 create_announced_chan_between_nodes(&nodes, 1, 2);
5041
5042                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5043                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5044                 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5045
5046                 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
5047                 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
5048                 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
5049                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
5050
5051                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5052                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5053                 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5054
5055                 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
5056                 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
5057                 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
5058                 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
5059
5060                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5061                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5062
5063                 claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
5064                 fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
5065
5066                 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
5067                 {
5068                         let events = nodes[0].node.get_and_clear_pending_events();
5069                         assert_eq!(events.len(), 2);
5070                         match events[0] {
5071                                 Event::PaymentSent { payment_preimage } => {
5072                                         assert_eq!(payment_preimage, payment_preimage_3);
5073                                 },
5074                                 _ => panic!("Unexpected event"),
5075                         }
5076                         match events[1] {
5077                                 Event::PaymentFailed { payment_hash, rejected_by_dest } => {
5078                                         assert_eq!(payment_hash, payment_hash_5);
5079                                         assert!(rejected_by_dest);
5080                                 },
5081                                 _ => panic!("Unexpected event"),
5082                         }
5083                 }
5084
5085                 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
5086                 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
5087         }
5088
5089         fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
5090                 // Test that we can reconnect when in-flight HTLC updates get dropped
5091                 let mut nodes = create_network(2);
5092                 if messages_delivered == 0 {
5093                         create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
5094                         // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
5095                 } else {
5096                         create_announced_chan_between_nodes(&nodes, 0, 1);
5097                 }
5098
5099                 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
5100                 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
5101
5102                 let payment_event = {
5103                         nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
5104                         check_added_monitors!(nodes[0], 1);
5105
5106                         let mut events = nodes[0].node.get_and_clear_pending_events();
5107                         assert_eq!(events.len(), 1);
5108                         SendEvent::from_event(events.remove(0))
5109                 };
5110                 assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
5111
5112                 if messages_delivered < 2 {
5113                         // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
5114                 } else {
5115                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
5116                         let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
5117                         check_added_monitors!(nodes[1], 1);
5118
5119                         if messages_delivered >= 3 {
5120                                 assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
5121                                 check_added_monitors!(nodes[0], 1);
5122
5123                                 if messages_delivered >= 4 {
5124                                         let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap();
5125                                         assert!(as_commitment_signed.is_none());
5126                                         check_added_monitors!(nodes[0], 1);
5127
5128                                         if messages_delivered >= 5 {
5129                                                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
5130                                                 check_added_monitors!(nodes[1], 1);
5131                                         }
5132                                 }
5133                         }
5134                 }
5135
5136                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5137                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5138                 if messages_delivered < 2 {
5139                         // Even if the funding_locked messages get exchanged, as long as nothing further was
5140                         // received on either side, both sides will need to resend them.
5141                         reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
5142                 } else if messages_delivered == 2 {
5143                         // nodes[0] still wants its RAA + commitment_signed
5144                         reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
5145                 } else if messages_delivered == 3 {
5146                         // nodes[0] still wants its commitment_signed
5147                         reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
5148                 } else if messages_delivered == 4 {
5149                         // nodes[1] still wants its final RAA
5150                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
5151                 } else if messages_delivered == 5 {
5152                         // Everything was delivered...
5153                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5154                 }
5155
5156                 let events_1 = nodes[1].node.get_and_clear_pending_events();
5157                 assert_eq!(events_1.len(), 1);
5158                 match events_1[0] {
5159                         Event::PendingHTLCsForwardable { .. } => { },
5160                         _ => panic!("Unexpected event"),
5161                 };
5162
5163                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5164                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5165                 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5166
5167                 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
5168                 nodes[1].node.process_pending_htlc_forwards();
5169
5170                 let events_2 = nodes[1].node.get_and_clear_pending_events();
5171                 assert_eq!(events_2.len(), 1);
5172                 match events_2[0] {
5173                         Event::PaymentReceived { ref payment_hash, amt } => {
5174                                 assert_eq!(payment_hash_1, *payment_hash);
5175                                 assert_eq!(amt, 1000000);
5176                         },
5177                         _ => panic!("Unexpected event"),
5178                 }
5179
5180                 nodes[1].node.claim_funds(payment_preimage_1);
5181                 check_added_monitors!(nodes[1], 1);
5182
5183                 let events_3 = nodes[1].node.get_and_clear_pending_events();
5184                 assert_eq!(events_3.len(), 1);
5185                 let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
5186                         Event::UpdateHTLCs { ref node_id, ref updates } => {
5187                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
5188                                 assert!(updates.update_add_htlcs.is_empty());
5189                                 assert!(updates.update_fail_htlcs.is_empty());
5190                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5191                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
5192                                 assert!(updates.update_fee.is_none());
5193                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
5194                         },
5195                         _ => panic!("Unexpected event"),
5196                 };
5197
5198                 if messages_delivered >= 1 {
5199                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
5200
5201                         let events_4 = nodes[0].node.get_and_clear_pending_events();
5202                         assert_eq!(events_4.len(), 1);
5203                         match events_4[0] {
5204                                 Event::PaymentSent { ref payment_preimage } => {
5205                                         assert_eq!(payment_preimage_1, *payment_preimage);
5206                                 },
5207                                 _ => panic!("Unexpected event"),
5208                         }
5209
5210                         if messages_delivered >= 2 {
5211                                 let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
5212                                 check_added_monitors!(nodes[0], 1);
5213
5214                                 if messages_delivered >= 3 {
5215                                         assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
5216                                         check_added_monitors!(nodes[1], 1);
5217
5218                                         if messages_delivered >= 4 {
5219                                                 let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
5220                                                 assert!(bs_commitment_signed.is_none());
5221                                                 check_added_monitors!(nodes[1], 1);
5222
5223                                                 if messages_delivered >= 5 {
5224                                                         assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
5225                                                         check_added_monitors!(nodes[0], 1);
5226                                                 }
5227                                         }
5228                                 }
5229                         }
5230                 }
5231
5232                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5233                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5234                 if messages_delivered < 2 {
5235                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
5236                         //TODO: Deduplicate PaymentSent events, then enable this if:
5237                         //if messages_delivered < 1 {
5238                                 let events_4 = nodes[0].node.get_and_clear_pending_events();
5239                                 assert_eq!(events_4.len(), 1);
5240                                 match events_4[0] {
5241                                         Event::PaymentSent { ref payment_preimage } => {
5242                                                 assert_eq!(payment_preimage_1, *payment_preimage);
5243                                         },
5244                                         _ => panic!("Unexpected event"),
5245                                 }
5246                         //}
5247                 } else if messages_delivered == 2 {
5248                         // nodes[0] still wants its RAA + commitment_signed
5249                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
5250                 } else if messages_delivered == 3 {
5251                         // nodes[0] still wants its commitment_signed
5252                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
5253                 } else if messages_delivered == 4 {
5254                         // nodes[1] still wants its final RAA
5255                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
5256                 } else if messages_delivered == 5 {
5257                         // Everything was delivered...
5258                         reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5259                 }
5260
5261                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5262                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5263                 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5264
5265                 // Channel should still work fine...
5266                 let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
5267                 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
5268         }
5269
5270         #[test]
5271         fn test_drop_messages_peer_disconnect_a() {
5272                 do_test_drop_messages_peer_disconnect(0);
5273                 do_test_drop_messages_peer_disconnect(1);
5274                 do_test_drop_messages_peer_disconnect(2);
5275         }
5276
5277         #[test]
5278         fn test_drop_messages_peer_disconnect_b() {
5279                 do_test_drop_messages_peer_disconnect(3);
5280                 do_test_drop_messages_peer_disconnect(4);
5281                 do_test_drop_messages_peer_disconnect(5);
5282         }
5283
5284         #[test]
5285         fn test_funding_peer_disconnect() {
5286                 // Test that we can lock in our funding tx while disconnected
5287                 let nodes = create_network(2);
5288                 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
5289
5290                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5291                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5292
5293                 confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
5294                 let events_1 = nodes[0].node.get_and_clear_pending_events();
5295                 assert_eq!(events_1.len(), 1);
5296                 match events_1[0] {
5297                         Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => {
5298                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5299                                 assert!(announcement_sigs.is_none());
5300                         },
5301                         _ => panic!("Unexpected event"),
5302                 }
5303
5304                 confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
5305                 let events_2 = nodes[1].node.get_and_clear_pending_events();
5306                 assert_eq!(events_2.len(), 1);
5307                 match events_2[0] {
5308                         Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => {
5309                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
5310                                 assert!(announcement_sigs.is_none());
5311                         },
5312                         _ => panic!("Unexpected event"),
5313                 }
5314
5315                 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5316                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
5317                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
5318                 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
5319
5320                 // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
5321                 // rebroadcasting announcement_signatures upon reconnect.
5322
5323                 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
5324                 let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
5325                 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
5326         }
5327
5328         #[test]
5329         fn test_invalid_channel_announcement() {
5330                 //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
5331                 let secp_ctx = Secp256k1::new();
5332                 let nodes = create_network(2);
5333
5334                 let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
5335
5336                 let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
5337                 let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
5338                 let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
5339                 let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
5340
5341                 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
5342
5343                 let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
5344                 let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
5345
5346                 let as_network_key = nodes[0].node.get_our_node_id();
5347                 let bs_network_key = nodes[1].node.get_our_node_id();
5348
5349                 let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
5350
5351                 let mut chan_announcement;
5352
5353                 macro_rules! dummy_unsigned_msg {
5354                         () => {
5355                                 msgs::UnsignedChannelAnnouncement {
5356                                         features: msgs::GlobalFeatures::new(),
5357                                         chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
5358                                         short_channel_id: as_chan.get_short_channel_id().unwrap(),
5359                                         node_id_1: if were_node_one { as_network_key } else { bs_network_key },
5360                                         node_id_2: if were_node_one { bs_network_key } else { as_network_key },
5361                                         bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
5362                                         bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
5363                                         excess_data: Vec::new(),
5364                                 };
5365                         }
5366                 }
5367
5368                 macro_rules! sign_msg {
5369                         ($unsigned_msg: expr) => {
5370                                 let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
5371                                 let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
5372                                 let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
5373                                 let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key);
5374                                 let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key);
5375                                 chan_announcement = msgs::ChannelAnnouncement {
5376                                         node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
5377                                         node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
5378                                         bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
5379                                         bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
5380                                         contents: $unsigned_msg
5381                                 }
5382                         }
5383                 }
5384
5385                 let unsigned_msg = dummy_unsigned_msg!();
5386                 sign_msg!(unsigned_msg);
5387                 assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
5388                 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
5389
5390                 // Configured with Network::Testnet
5391                 let mut unsigned_msg = dummy_unsigned_msg!();
5392                 unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
5393                 sign_msg!(unsigned_msg);
5394                 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
5395
5396                 let mut unsigned_msg = dummy_unsigned_msg!();
5397                 unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
5398                 sign_msg!(unsigned_msg);
5399                 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
5400         }
5401
5402         fn run_onion_failure_test<F1,F2>(_name: &str, test_case: u8, nodes: &Vec<Node>, channels: &[(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction)], route: &Route, payment_hash: &[u8; 32], mut callback1: F1, mut callback2: F2, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
5403                 where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
5404                                         F2: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
5405         {
5406                 use ln::msgs::HTLCFailChannelUpdate;
5407
5408                 // reset block height
5409                 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5410                 for ix in 0..nodes.len() {
5411                         nodes[ix].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
5412                 }
5413
5414                 macro_rules! expect_update_htlc_event {
5415                         ($node: expr) => {{
5416                                 let events = $node.node.get_and_clear_pending_events();
5417                                 assert_eq!(events.len(), 1);
5418                                 match events[0] {
5419                                         Event::UpdateHTLCs { node_id:_, ref updates } => { },
5420                                         _ => panic!("Unexpected event"),
5421                                 };
5422                                 $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
5423                                 $node.node.process_pending_htlc_forwards();
5424                         }}
5425                 }
5426
5427                 macro_rules! expect_pending_htlcs_forwardable {
5428                         ($node: expr) => {{
5429                                 let events = $node.node.get_and_clear_pending_events();
5430                                 assert_eq!(events.len(), 1);
5431                                 match events[0] {
5432                                         Event::PendingHTLCsForwardable { .. } => { },
5433                                         _ => panic!("Unexpected event"),
5434                                 };
5435                                 $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
5436                                 $node.node.process_pending_htlc_forwards();
5437                         }}
5438                 };
5439
5440                 macro_rules! expect_pending_htlcs_forwardable {
5441                         ($node: expr) => {{
5442                                 let events = $node.node.get_and_clear_pending_events();
5443                                 assert_eq!(events.len(), 1);
5444                                 match events[0] {
5445                                         Event::PendingHTLCsForwardable { .. } => { },
5446                                         _ => panic!("Unexpected event"),
5447                                 };
5448                                 $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
5449                                 $node.node.process_pending_htlc_forwards();
5450                         }}
5451                 };
5452
5453                 macro_rules! expect_forward_event {
5454                         ($node: expr) => {{
5455                                 let mut events = $node.node.get_and_clear_pending_events();
5456                                 assert_eq!(events.len(), 1);
5457                                 check_added_monitors!($node, 1);
5458                                 SendEvent::from_event(events.remove(0))
5459                         }}
5460                 };
5461
5462                 macro_rules! expect_forward_event {
5463                         ($node: expr) => {{
5464                                 let mut events = $node.node.get_and_clear_pending_events();
5465                                 assert_eq!(events.len(), 1);
5466                                 check_added_monitors!($node, 1);
5467                                 SendEvent::from_event(events.remove(0))
5468                         }}
5469                 };
5470
5471                 macro_rules! expect_fail_backward_event {
5472                         ($node: expr) => {{
5473                                 let events = $node.node.get_and_clear_pending_events();
5474                                 assert_eq!(events.len(), 1);
5475                                 match events[0] {
5476                                         Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, ref update_fail_htlcs, update_fail_malformed_htlcs:_, update_fee:_, ref commitment_signed } } => {
5477                                                 assert_eq!(update_fail_htlcs.len(),1);
5478                                                 (update_fail_htlcs[0].clone(), commitment_signed.clone())
5479                                         },
5480                                         _ => panic!("Unexpected event type!"),
5481                                 }
5482                         }}
5483                 };
5484
5485                 nodes[0].node.send_payment(route.clone(), payment_hash.clone()).unwrap();
5486                 let payment_event = expect_forward_event!(nodes[0]);
5487                 let mut update_add_1 = payment_event.msgs[0].clone();
5488
5489                 callback1(&mut update_add_1);
5490
5491                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_1).unwrap();
5492
5493                 // 0 => 1
5494                 let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
5495                 check_added_monitors!(nodes[1], 1);
5496                 assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
5497                 check_added_monitors!(nodes[0], 1);
5498                 let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
5499                 assert!(bs_none.is_none());
5500                 check_added_monitors!(nodes[0], 1);
5501                 let commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
5502                 check_added_monitors!(nodes[1], 1);
5503
5504                 let (update_fail_htlc, commitment_signed) = match test_case {
5505                         0 => { // intermediat node failure
5506                                 assert!(commitment_update.is_some());
5507                                 let commitment_update = commitment_update.unwrap();
5508                                 assert!(commitment_update.update_fail_htlcs.len() == 1);
5509                                 (commitment_update.update_fail_htlcs[0].clone(), commitment_update.commitment_signed)
5510                         },
5511                         1 => { // final node failure
5512                                 assert!(commitment_update.is_none());
5513                                 expect_pending_htlcs_forwardable!(&nodes[1]);
5514
5515                                 let ref payment_event = expect_forward_event!(nodes[1]);
5516                                 let mut update_add_2 = payment_event.msgs[0].clone();
5517
5518                                 callback2(&mut update_add_2);
5519
5520                                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_2).unwrap();
5521                                 let (as_revoke_and_ack, as_commitment_signed) = nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
5522                                 check_added_monitors!(nodes[2], 1);
5523                                 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
5524
5525                                 check_added_monitors!(nodes[1], 1);
5526                                 let (bs_revoke_and_ack, bs_none) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
5527                                 assert!(bs_none.is_none());
5528                                 check_added_monitors!(nodes[1], 1);
5529                                 let commitment_update = nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap();
5530                                 check_added_monitors!(nodes[2], 1);
5531
5532                                 assert!(commitment_update.update_fail_htlcs.len() == 1);
5533                                 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &commitment_update.update_fail_htlcs[0]).unwrap();
5534                                 commitment_signed_dance!(nodes[1], nodes[2], commitment_update.commitment_signed, true);
5535
5536                                 let (update_fail_htlc_msg, commitment_signed) = expect_fail_backward_event!(nodes[1]);
5537                                 (update_fail_htlc_msg, commitment_signed)
5538                         },
5539                         _ => unreachable!(),
5540                 };
5541
5542                 // origin node fail handling
5543                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc).unwrap();
5544                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
5545
5546                 let events = nodes[0].node.get_and_clear_pending_events();
5547                 //TODO assert_eq!(events.len(), 2);
5548                 for evt in events.iter() {
5549                         match evt {
5550                                 &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest} => {
5551                                         assert_eq!(*rejected_by_dest, !expected_retryable);
5552                                 },
5553                                 &Event::RouteUpdate { ref update } => {
5554                                         /* TODO check expected channel update */
5555                                         /*
5556                                         expected_channel_update.unwrap()
5557                                         assert!(expected_channel_update.is_some());
5558                                         if let Some(expected_update) = expected_channel_update {
5559                                                 assert!(fail_channel_update.is_some());
5560                                                 match fail_channel_update.unwrap() {
5561                                                         expected_update => {},
5562                                                         _ => panic!("Unexpected channel update"),
5563                                                 }
5564                                         }
5565                                         */
5566                                 },
5567                                 _ => panic!("Unexpected event"),
5568                         };
5569                 }
5570         }
5571
5572         #[test]
5573         fn test_onion_failure() {
5574                 use ln::msgs::ChannelUpdate;
5575                 use ln::channelmanager::CLTV_FAR_FAR_AWAY;
5576                 use bitcoin::blockdata::transaction::Transaction;
5577
5578                 const BADONION: u16 = 0x8000;
5579                 const PERM: u16 = 0x4000;
5580                 const NODE: u16 = 0x1000;
5581                 const UPDATE: u16 = 0x1000;
5582
5583                 let mut nodes = create_network(3);
5584                 let channels = [create_announced_chan_between_nodes(&nodes, 0, 1), create_announced_chan_between_nodes(&nodes, 1, 2)];
5585                 let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
5586                 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap();
5587                 //
5588                 run_onion_failure_test("invalid_realm", 0, &nodes, &channels, &route, &payment_hash, |msg| {
5589                         let session_priv = SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[3; 32]).unwrap();
5590                         let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
5591                         let onion_keys = ChannelManager::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
5592                         let (onion_payloads, _htlc_msat, _htlc_cltv) = ChannelManager::build_onion_payloads_with_realm(&route, cur_height, 100).unwrap();
5593                         let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
5594                         msg.onion_routing_packet = onion_packet;
5595                 }, |_msg| {}, true, Some(PERM|1), None);
5596
5597                 //TODO temporary_node_failure(NODE|2)
5598                 //TODO permanent_node_failure(PERM|NODE|2)
5599                 //TODO required_node_feature_missing
5600                 run_onion_failure_test("invalid_onion_version", 0, &nodes, &channels, &route, &payment_hash, |msg| { msg.onion_routing_packet.version = 1; }, |_msg| {}, true, Some(BADONION|PERM|4), None);
5601                 run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &channels, &route, &payment_hash, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, |_msg| {}, true, Some(BADONION|PERM|5), None);
5602                 //TODO invalid_onion_key
5603                 //TODO temporary_channel_failure(UPDATE|7)
5604                 //TODO permanent_channel_failure(PERM|8)
5605                 //TODO required_channel_feature_missing(PERM|9)
5606                 //TODO unknown_next_peer(PERM|10)
5607                 //TODO amount_below_minimum(UPDATE|11)
5608                 //TODO fee_insufficient(UPDATE|12)
5609                 run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &channels, &route, &payment_hash, |msg| {
5610                         // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value
5611                         msg.cltv_expiry -= 1;
5612                 }, |_msg| {}, true, Some(UPDATE|13), None);
5613                 run_onion_failure_test("expiry_too_soon", 0, &nodes, &channels, &route, &payment_hash, |msg| {
5614                         let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1;
5615                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5616                         nodes[1].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
5617                 }, |_msg| {}, true, Some(UPDATE|14), None);
5618                 // TODO: unknown_payment_hash (PERM|15)
5619                 // TODO: unknown_payment_amount (PERM|15)
5620                 run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &channels, &route, &payment_hash, |_msg| {}, |msg| {
5621                         let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1;
5622                         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5623                         nodes[2].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
5624                 }, true, Some(17), None);
5625                 run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &channels, &route, &payment_hash, |_msg| {}, |msg| { msg.cltv_expiry += 1; }, true, Some(18), None);
5626                 /* TODO this raise 'Invalid commitment tx signature from peer'
5627                 run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &channels, &route, &payment_hash, |_msg| {}, |msg| {
5628                         // violate amt_to_forward > msg.amount_msat
5629                         msg.amount_msat -= 1;
5630                 }, true, Some(19), None);
5631                 */
5632                 // TODO: channel_disabled (UPDATE|20)
5633                 run_onion_failure_test("expiry_too_far", 0, &nodes, &channels, &route, &payment_hash, |msg| {
5634                         let session_priv = SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[3; 32]).unwrap();
5635                         let mut route = route.clone();
5636                         let height = 1;
5637                         route.hops[1].cltv_expiry_delta += CLTV_FAR_FAR_AWAY + route.hops[0].cltv_expiry_delta + 1;
5638                         let onion_keys = ChannelManager::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
5639                         let (onion_payloads, _htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, height).unwrap();
5640                         let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
5641                         msg.cltv_expiry = htlc_cltv;
5642                         msg.onion_routing_packet = onion_packet;
5643                 }, |_msg| {}, true, Some(21), None);
5644         }
5645 }