1 //! The top-level channel management and payment tracking stuff lives here.
3 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
4 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
5 //! upon reconnect to the relevant peer(s).
7 //! It does not manage routing logic (see ln::router for that) nor does it manage constructing
8 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
9 //! imply it needs to fail HTLCs/payments/channels it manages).
11 use bitcoin::blockdata::block::BlockHeader;
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::blockdata::constants::genesis_block;
14 use bitcoin::network::constants::Network;
15 use bitcoin::network::serialize::BitcoinHash;
16 use bitcoin::util::hash::Sha256dHash;
18 use secp256k1::key::{SecretKey,PublicKey};
19 use secp256k1::{Secp256k1,Message};
20 use secp256k1::ecdh::SharedSecret;
23 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
24 use chain::transaction::OutPoint;
25 use ln::channel::{Channel, ChannelError};
26 use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
27 use ln::router::{Route,RouteHop};
29 use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
30 use chain::keysinterface::KeysInterface;
31 use util::config::UserConfig;
32 use util::{byte_utils, events, internal_traits, rng};
33 use util::sha2::Sha256;
34 use util::ser::{Readable, ReadableArgs, Writeable, Writer};
35 use util::chacha20poly1305rfc::ChaCha20;
36 use util::logger::Logger;
37 use util::errors::APIError;
40 use crypto::mac::{Mac,MacResult};
41 use crypto::hmac::Hmac;
42 use crypto::digest::Digest;
43 use crypto::symmetriccipher::SynchronousStreamCipher;
45 use std::{cmp, ptr, mem};
46 use std::collections::{HashMap, hash_map, HashSet};
48 use std::sync::{Arc, Mutex, MutexGuard, RwLock};
49 use std::sync::atomic::{AtomicUsize, Ordering};
50 use std::time::{Instant,Duration};
52 /// We hold various information about HTLC relay in the HTLC objects in Channel itself:
54 /// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
55 /// forward the HTLC with information it will give back to us when it does so, or if it should Fail
56 /// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
58 /// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
59 /// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
60 /// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
61 /// the HTLC backwards along the relevant path).
62 /// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
63 /// our payment, which we can use to decode errors or inform the user that the payment was sent.
64 mod channel_held_info {
66 use ln::router::Route;
67 use secp256k1::key::SecretKey;
69 /// Stores the info we will need to send when we want to forward an HTLC onwards
70 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
71 pub struct PendingForwardHTLCInfo {
72 pub(super) onion_packet: Option<msgs::OnionPacket>,
73 pub(super) incoming_shared_secret: [u8; 32],
74 pub(super) payment_hash: [u8; 32],
75 pub(super) short_channel_id: u64,
76 pub(super) amt_to_forward: u64,
77 pub(super) outgoing_cltv_value: u32,
80 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
81 pub enum HTLCFailureMsg {
82 Relay(msgs::UpdateFailHTLC),
83 Malformed(msgs::UpdateFailMalformedHTLC),
86 /// Stores whether we can't forward an HTLC or relevant forwarding info
87 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
88 pub enum PendingHTLCStatus {
89 Forward(PendingForwardHTLCInfo),
93 /// Tracks the inbound corresponding to an outbound HTLC
95 pub struct HTLCPreviousHopData {
96 pub(super) short_channel_id: u64,
97 pub(super) htlc_id: u64,
98 pub(super) incoming_packet_shared_secret: [u8; 32],
101 /// Tracks the inbound corresponding to an outbound HTLC
103 pub enum HTLCSource {
104 PreviousHopData(HTLCPreviousHopData),
107 session_priv: SecretKey,
108 /// Technically we can recalculate this from the route, but we cache it here to avoid
109 /// doing a double-pass on route when we get a failure back
110 first_hop_htlc_msat: u64,
115 pub fn dummy() -> Self {
116 HTLCSource::OutboundRoute {
117 route: Route { hops: Vec::new() },
118 session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(),
119 first_hop_htlc_msat: 0,
124 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
125 pub(crate) enum HTLCFailReason {
127 err: msgs::OnionErrorPacket,
135 pub(super) use self::channel_held_info::*;
137 struct MsgHandleErrInternal {
138 err: msgs::HandleError,
139 needs_channel_force_close: bool,
141 impl MsgHandleErrInternal {
143 fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
147 action: Some(msgs::ErrorAction::SendErrorMessage {
148 msg: msgs::ErrorMessage {
150 data: err.to_string()
154 needs_channel_force_close: false,
158 fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self {
162 action: Some(msgs::ErrorAction::SendErrorMessage {
163 msg: msgs::ErrorMessage {
165 data: err.to_string()
169 needs_channel_force_close: true,
173 fn from_maybe_close(err: msgs::HandleError) -> Self {
174 Self { err, needs_channel_force_close: true }
177 fn from_no_close(err: msgs::HandleError) -> Self {
178 Self { err, needs_channel_force_close: false }
181 fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
184 ChannelError::Ignore(msg) => HandleError {
186 action: Some(msgs::ErrorAction::IgnoreError),
188 ChannelError::Close(msg) => HandleError {
190 action: Some(msgs::ErrorAction::SendErrorMessage {
191 msg: msgs::ErrorMessage {
193 data: msg.to_string()
198 needs_channel_force_close: false,
202 fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
205 ChannelError::Ignore(msg) => HandleError {
207 action: Some(msgs::ErrorAction::IgnoreError),
209 ChannelError::Close(msg) => HandleError {
211 action: Some(msgs::ErrorAction::SendErrorMessage {
212 msg: msgs::ErrorMessage {
214 data: msg.to_string()
219 needs_channel_force_close: true,
224 /// Pass to fail_htlc_backwwards to indicate the reason to fail the payment
225 /// after a PaymentReceived event.
227 pub enum PaymentFailReason {
228 /// Indicate the preimage for payment_hash is not known after a PaymentReceived event
230 /// Indicate the payment amount is incorrect ( received is < expected or > 2*expected ) after a PaymentReceived event
234 /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
235 /// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
236 /// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
237 /// probably increase this significantly.
238 const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50;
240 struct HTLCForwardInfo {
241 prev_short_channel_id: u64,
243 forward_info: PendingForwardHTLCInfo,
246 /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
247 /// be sent in the order they appear in the return value, however sometimes the order needs to be
248 /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
249 /// they were originally sent). In those cases, this enum is also returned.
250 #[derive(Clone, PartialEq)]
251 pub(super) enum RAACommitmentOrder {
252 /// Send the CommitmentUpdate messages first
254 /// Send the RevokeAndACK message first
258 struct ChannelHolder {
259 by_id: HashMap<[u8; 32], Channel>,
260 short_to_id: HashMap<u64, [u8; 32]>,
261 next_forward: Instant,
262 /// short channel id -> forward infos. Key of 0 means payments received
263 /// Note that while this is held in the same mutex as the channels themselves, no consistency
264 /// guarantees are made about there existing a channel with the short id here, nor the short
265 /// ids in the PendingForwardHTLCInfo!
266 forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
267 /// Note that while this is held in the same mutex as the channels themselves, no consistency
268 /// guarantees are made about the channels given here actually existing anymore by the time you
270 claimable_htlcs: HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
271 /// Messages to send to peers - pushed to in the same lock that they are generated in (except
272 /// for broadcast messages, where ordering isn't as strict).
273 pending_msg_events: Vec<events::MessageSendEvent>,
275 struct MutChannelHolder<'a> {
276 by_id: &'a mut HashMap<[u8; 32], Channel>,
277 short_to_id: &'a mut HashMap<u64, [u8; 32]>,
278 next_forward: &'a mut Instant,
279 forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
280 claimable_htlcs: &'a mut HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
281 pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
284 fn borrow_parts(&mut self) -> MutChannelHolder {
286 by_id: &mut self.by_id,
287 short_to_id: &mut self.short_to_id,
288 next_forward: &mut self.next_forward,
289 forward_htlcs: &mut self.forward_htlcs,
290 claimable_htlcs: &mut self.claimable_htlcs,
291 pending_msg_events: &mut self.pending_msg_events,
296 #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
297 const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
299 /// Manager which keeps track of a number of channels and sends messages to the appropriate
300 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
302 /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
303 /// to individual Channels.
305 /// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
306 /// all peers during write/read (though does not modify this instance, only the instance being
307 /// serialized). This will result in any channels which have not yet exchanged funding_created (ie
308 /// called funding_transaction_generated for outbound channels).
310 /// Note that you can be a bit lazier about writing out ChannelManager than you can be with
311 /// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
312 /// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
313 /// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
314 /// the serialization process). If the deserialized version is out-of-date compared to the
315 /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
316 /// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
318 /// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
319 /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
320 /// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
321 /// block_connected() to step towards your best block) upon deserialization before using the
323 pub struct ChannelManager {
324 default_configuration: UserConfig,
325 genesis_hash: Sha256dHash,
326 fee_estimator: Arc<FeeEstimator>,
327 monitor: Arc<ManyChannelMonitor>,
328 chain_monitor: Arc<ChainWatchInterface>,
329 tx_broadcaster: Arc<BroadcasterInterface>,
331 latest_block_height: AtomicUsize,
332 last_block_hash: Mutex<Sha256dHash>,
333 secp_ctx: Secp256k1<secp256k1::All>,
335 channel_state: Mutex<ChannelHolder>,
336 our_network_key: SecretKey,
338 pending_events: Mutex<Vec<events::Event>>,
339 /// Used when we have to take a BIG lock to make sure everything is self-consistent.
340 /// Essentially just when we're serializing ourselves out.
341 /// Taken first everywhere where we are making changes before any other locks.
342 total_consistency_lock: RwLock<()>,
344 keys_manager: Arc<KeysInterface>,
349 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
350 /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
351 /// ie the node we forwarded the payment on to should always have enough room to reliably time out
352 /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
353 /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
354 const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
355 const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
357 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
358 // if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
359 // HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the
360 // CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC.
363 const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER;
365 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
366 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
369 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
371 macro_rules! secp_call {
372 ( $res: expr, $err: expr ) => {
375 Err(_) => return Err($err),
382 shared_secret: SharedSecret,
384 blinding_factor: [u8; 32],
385 ephemeral_pubkey: PublicKey,
390 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
391 pub struct ChannelDetails {
392 /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
393 /// thereafter this is the txid of the funding transaction xor the funding transaction output).
394 /// Note that this means this value is *not* persistent - it can change once during the
395 /// lifetime of the channel.
396 pub channel_id: [u8; 32],
397 /// The position of the funding transaction in the chain. None if the funding transaction has
398 /// not yet been confirmed and the channel fully opened.
399 pub short_channel_id: Option<u64>,
400 /// The node_id of our counterparty
401 pub remote_network_id: PublicKey,
402 /// The value, in satoshis, of this channel as appears in the funding output
403 pub channel_value_satoshis: u64,
404 /// The user_id passed in to create_channel, or 0 if the channel was inbound.
408 impl ChannelManager {
409 /// Constructs a new ChannelManager to hold several channels and route between them.
411 /// This is the main "logic hub" for all channel-related actions, and implements
412 /// ChannelMessageHandler.
414 /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
416 /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
417 pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig) -> Result<Arc<ChannelManager>, secp256k1::Error> {
418 let secp_ctx = Secp256k1::new();
420 let res = Arc::new(ChannelManager {
421 default_configuration: config.clone(),
422 genesis_hash: genesis_block(network).header.bitcoin_hash(),
423 fee_estimator: feeest.clone(),
424 monitor: monitor.clone(),
428 latest_block_height: AtomicUsize::new(0), //TODO: Get an init value
429 last_block_hash: Mutex::new(Default::default()),
432 channel_state: Mutex::new(ChannelHolder{
433 by_id: HashMap::new(),
434 short_to_id: HashMap::new(),
435 next_forward: Instant::now(),
436 forward_htlcs: HashMap::new(),
437 claimable_htlcs: HashMap::new(),
438 pending_msg_events: Vec::new(),
440 our_network_key: keys_manager.get_node_secret(),
442 pending_events: Mutex::new(Vec::new()),
443 total_consistency_lock: RwLock::new(()),
449 let weak_res = Arc::downgrade(&res);
450 res.chain_monitor.register_listener(weak_res);
454 /// Creates a new outbound channel to the given remote node and with the given value.
456 /// user_id will be provided back as user_channel_id in FundingGenerationReady and
457 /// FundingBroadcastSafe events to allow tracking of which events correspond with which
458 /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
459 /// may wish to avoid using 0 for user_id here.
461 /// If successful, will generate a SendOpenChannel message event, so you should probably poll
462 /// PeerManager::process_events afterwards.
464 /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
465 /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
466 pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
467 if channel_value_satoshis < 1000 {
468 return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
471 let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?;
472 let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
474 let _ = self.total_consistency_lock.read().unwrap();
475 let mut channel_state = self.channel_state.lock().unwrap();
476 match channel_state.by_id.entry(channel.channel_id()) {
477 hash_map::Entry::Occupied(_) => {
478 if cfg!(feature = "fuzztarget") {
479 return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
481 panic!("RNG is bad???");
484 hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
486 channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
487 node_id: their_network_key,
493 /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
494 /// more information.
495 pub fn list_channels(&self) -> Vec<ChannelDetails> {
496 let channel_state = self.channel_state.lock().unwrap();
497 let mut res = Vec::with_capacity(channel_state.by_id.len());
498 for (channel_id, channel) in channel_state.by_id.iter() {
499 res.push(ChannelDetails {
500 channel_id: (*channel_id).clone(),
501 short_channel_id: channel.get_short_channel_id(),
502 remote_network_id: channel.get_their_node_id(),
503 channel_value_satoshis: channel.get_value_satoshis(),
504 user_id: channel.get_user_id(),
510 /// Gets the list of usable channels, in random order. Useful as an argument to
511 /// Router::get_route to ensure non-announced channels are used.
512 pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
513 let channel_state = self.channel_state.lock().unwrap();
514 let mut res = Vec::with_capacity(channel_state.by_id.len());
515 for (channel_id, channel) in channel_state.by_id.iter() {
516 // Note we use is_live here instead of usable which leads to somewhat confused
517 // internal/external nomenclature, but that's ok cause that's probably what the user
518 // really wanted anyway.
519 if channel.is_live() {
520 res.push(ChannelDetails {
521 channel_id: (*channel_id).clone(),
522 short_channel_id: channel.get_short_channel_id(),
523 remote_network_id: channel.get_their_node_id(),
524 channel_value_satoshis: channel.get_value_satoshis(),
525 user_id: channel.get_user_id(),
532 /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
533 /// will be accepted on the given channel, and after additional timeout/the closing of all
534 /// pending HTLCs, the channel will be closed on chain.
536 /// May generate a SendShutdown message event on success, which should be relayed.
537 pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
538 let _ = self.total_consistency_lock.read().unwrap();
540 let (mut failed_htlcs, chan_option) = {
541 let mut channel_state_lock = self.channel_state.lock().unwrap();
542 let channel_state = channel_state_lock.borrow_parts();
543 match channel_state.by_id.entry(channel_id.clone()) {
544 hash_map::Entry::Occupied(mut chan_entry) => {
545 let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
546 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
547 node_id: chan_entry.get().get_their_node_id(),
550 if chan_entry.get().is_shutdown() {
551 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
552 channel_state.short_to_id.remove(&short_id);
554 (failed_htlcs, Some(chan_entry.remove_entry().1))
555 } else { (failed_htlcs, None) }
557 hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
560 for htlc_source in failed_htlcs.drain(..) {
561 // unknown_next_peer...I dunno who that is anymore....
562 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
564 let chan_update = if let Some(chan) = chan_option {
565 if let Ok(update) = self.get_channel_update(&chan) {
570 if let Some(update) = chan_update {
571 let mut channel_state = self.channel_state.lock().unwrap();
572 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
581 fn finish_force_close_channel(&self, shutdown_res: (Vec<Transaction>, Vec<(HTLCSource, [u8; 32])>)) {
582 let (local_txn, mut failed_htlcs) = shutdown_res;
583 for htlc_source in failed_htlcs.drain(..) {
584 // unknown_next_peer...I dunno who that is anymore....
585 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
587 for tx in local_txn {
588 self.tx_broadcaster.broadcast_transaction(&tx);
590 //TODO: We need to have a way where outbound HTLC claims can result in us claiming the
591 //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
592 //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
593 //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
594 //timeouts are hit and our claims confirm).
595 //TODO: In any case, we need to make sure we remove any pending htlc tracking (via
596 //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
599 /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
600 /// the chain and rejecting new HTLCs on the given channel.
601 pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
602 let _ = self.total_consistency_lock.read().unwrap();
605 let mut channel_state_lock = self.channel_state.lock().unwrap();
606 let channel_state = channel_state_lock.borrow_parts();
607 if let Some(chan) = channel_state.by_id.remove(channel_id) {
608 if let Some(short_id) = chan.get_short_channel_id() {
609 channel_state.short_to_id.remove(&short_id);
616 self.finish_force_close_channel(chan.force_shutdown());
617 if let Ok(update) = self.get_channel_update(&chan) {
618 let mut channel_state = self.channel_state.lock().unwrap();
619 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
625 /// Force close all channels, immediately broadcasting the latest local commitment transaction
626 /// for each to the chain and rejecting new HTLCs on each.
627 pub fn force_close_all_channels(&self) {
628 for chan in self.list_channels() {
629 self.force_close_channel(&chan.channel_id);
633 fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) {
635 ChannelMonitorUpdateErr::PermanentFailure => {
637 let channel_state = channel_state_lock.borrow_parts();
638 let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
639 if let Some(short_id) = chan.get_short_channel_id() {
640 channel_state.short_to_id.remove(&short_id);
644 mem::drop(channel_state_lock);
645 self.finish_force_close_channel(chan.force_shutdown());
646 if let Ok(update) = self.get_channel_update(&chan) {
647 let mut channel_state = self.channel_state.lock().unwrap();
648 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
653 ChannelMonitorUpdateErr::TemporaryFailure => {
654 let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
655 channel.monitor_update_failed(reason);
661 fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
662 assert_eq!(shared_secret.len(), 32);
664 let mut hmac = Hmac::new(Sha256::new(), &[0x72, 0x68, 0x6f]); // rho
665 hmac.input(&shared_secret[..]);
666 let mut res = [0; 32];
667 hmac.raw_result(&mut res);
671 let mut hmac = Hmac::new(Sha256::new(), &[0x6d, 0x75]); // mu
672 hmac.input(&shared_secret[..]);
673 let mut res = [0; 32];
674 hmac.raw_result(&mut res);
680 fn gen_um_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
681 assert_eq!(shared_secret.len(), 32);
682 let mut hmac = Hmac::new(Sha256::new(), &[0x75, 0x6d]); // um
683 hmac.input(&shared_secret[..]);
684 let mut res = [0; 32];
685 hmac.raw_result(&mut res);
690 fn gen_ammag_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
691 assert_eq!(shared_secret.len(), 32);
692 let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
693 hmac.input(&shared_secret[..]);
694 let mut res = [0; 32];
695 hmac.raw_result(&mut res);
699 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
701 fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(SharedSecret, [u8; 32], PublicKey, &RouteHop)> (secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey, mut callback: FType) -> Result<(), secp256k1::Error> {
702 let mut blinded_priv = session_priv.clone();
703 let mut blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
705 for hop in route.hops.iter() {
706 let shared_secret = SharedSecret::new(secp_ctx, &hop.pubkey, &blinded_priv);
708 let mut sha = Sha256::new();
709 sha.input(&blinded_pub.serialize()[..]);
710 sha.input(&shared_secret[..]);
711 let mut blinding_factor = [0u8; 32];
712 sha.result(&mut blinding_factor);
714 let ephemeral_pubkey = blinded_pub;
716 blinded_priv.mul_assign(secp_ctx, &SecretKey::from_slice(secp_ctx, &blinding_factor)?)?;
717 blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
719 callback(shared_secret, blinding_factor, ephemeral_pubkey, hop);
725 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
726 fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, secp256k1::Error> {
727 let mut res = Vec::with_capacity(route.hops.len());
729 Self::construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _| {
730 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret[..]);
736 blinding_factor: _blinding_factor,
746 /// returns the hop data, as well as the first-hop value_msat and CLTV value we should send.
747 fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec<msgs::OnionHopData>, u64, u32), APIError> {
748 let mut cur_value_msat = 0u64;
749 let mut cur_cltv = starting_htlc_offset;
750 let mut last_short_channel_id = 0;
751 let mut res: Vec<msgs::OnionHopData> = Vec::with_capacity(route.hops.len());
752 internal_traits::test_no_dealloc::<msgs::OnionHopData>(None);
753 unsafe { res.set_len(route.hops.len()); }
755 for (idx, hop) in route.hops.iter().enumerate().rev() {
756 // First hop gets special values so that it can check, on receipt, that everything is
757 // exactly as it should be (and the next hop isn't trying to probe to find out if we're
758 // the intended recipient).
759 let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
760 let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
761 res[idx] = msgs::OnionHopData {
763 data: msgs::OnionRealm0HopData {
764 short_channel_id: last_short_channel_id,
765 amt_to_forward: value_msat,
766 outgoing_cltv_value: cltv,
770 cur_value_msat += hop.fee_msat;
771 if cur_value_msat >= 21000000 * 100000000 * 1000 {
772 return Err(APIError::RouteError{err: "Channel fees overflowed?!"});
774 cur_cltv += hop.cltv_expiry_delta as u32;
775 if cur_cltv >= 500000000 {
776 return Err(APIError::RouteError{err: "Channel CLTV overflowed?!"});
778 last_short_channel_id = hop.short_channel_id;
780 Ok((res, cur_value_msat, cur_cltv))
784 fn shift_arr_right(arr: &mut [u8; 20*65]) {
786 ptr::copy(arr[0..].as_ptr(), arr[65..].as_mut_ptr(), 19*65);
794 fn xor_bufs(dst: &mut[u8], src: &[u8]) {
795 assert_eq!(dst.len(), src.len());
797 for i in 0..dst.len() {
802 const ZERO:[u8; 21*65] = [0; 21*65];
803 fn construct_onion_packet(mut payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, associated_data: &[u8; 32]) -> msgs::OnionPacket {
804 let mut buf = Vec::with_capacity(21*65);
805 buf.resize(21*65, 0);
808 let iters = payloads.len() - 1;
809 let end_len = iters * 65;
810 let mut res = Vec::with_capacity(end_len);
811 res.resize(end_len, 0);
813 for (i, keys) in onion_keys.iter().enumerate() {
814 if i == payloads.len() - 1 { continue; }
815 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
816 chacha.process(&ChannelManager::ZERO, &mut buf); // We don't have a seek function :(
817 ChannelManager::xor_bufs(&mut res[0..(i + 1)*65], &buf[(20 - i)*65..21*65]);
822 let mut packet_data = [0; 20*65];
823 let mut hmac_res = [0; 32];
825 for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
826 ChannelManager::shift_arr_right(&mut packet_data);
827 payload.hmac = hmac_res;
828 packet_data[0..65].copy_from_slice(&payload.encode()[..]);
830 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
831 chacha.process(&packet_data, &mut buf[0..20*65]);
832 packet_data[..].copy_from_slice(&buf[0..20*65]);
835 packet_data[20*65 - filler.len()..20*65].copy_from_slice(&filler[..]);
838 let mut hmac = Hmac::new(Sha256::new(), &keys.mu);
839 hmac.input(&packet_data);
840 hmac.input(&associated_data[..]);
841 hmac.raw_result(&mut hmac_res);
846 public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
847 hop_data: packet_data,
852 /// Encrypts a failure packet. raw_packet can either be a
853 /// msgs::DecodedOnionErrorPacket.encode() result or a msgs::OnionErrorPacket.data element.
854 fn encrypt_failure_packet(shared_secret: &[u8], raw_packet: &[u8]) -> msgs::OnionErrorPacket {
855 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
857 let mut packet_crypted = Vec::with_capacity(raw_packet.len());
858 packet_crypted.resize(raw_packet.len(), 0);
859 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
860 chacha.process(&raw_packet, &mut packet_crypted[..]);
861 msgs::OnionErrorPacket {
862 data: packet_crypted,
866 fn build_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket {
867 assert_eq!(shared_secret.len(), 32);
868 assert!(failure_data.len() <= 256 - 2);
870 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
873 let mut res = Vec::with_capacity(2 + failure_data.len());
874 res.push(((failure_type >> 8) & 0xff) as u8);
875 res.push(((failure_type >> 0) & 0xff) as u8);
876 res.extend_from_slice(&failure_data[..]);
880 let mut res = Vec::with_capacity(256 - 2 - failure_data.len());
881 res.resize(256 - 2 - failure_data.len(), 0);
884 let mut packet = msgs::DecodedOnionErrorPacket {
886 failuremsg: failuremsg,
890 let mut hmac = Hmac::new(Sha256::new(), &um);
891 hmac.input(&packet.encode()[32..]);
892 hmac.raw_result(&mut packet.hmac);
898 fn build_first_hop_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
899 let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data);
900 ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
903 fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder>) {
904 macro_rules! get_onion_hash {
907 let mut sha = Sha256::new();
908 sha.input(&msg.onion_routing_packet.hop_data);
909 let mut onion_hash = [0; 32];
910 sha.result(&mut onion_hash);
916 if let Err(_) = msg.onion_routing_packet.public_key {
917 log_info!(self, "Failed to accept/forward incoming HTLC with invalid ephemeral pubkey");
918 return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
919 channel_id: msg.channel_id,
920 htlc_id: msg.htlc_id,
921 sha256_of_onion: get_onion_hash!(),
922 failure_code: 0x8000 | 0x4000 | 6,
923 })), self.channel_state.lock().unwrap());
926 let shared_secret = {
927 let mut arr = [0; 32];
928 arr.copy_from_slice(&SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
931 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
933 let mut channel_state = None;
934 macro_rules! return_err {
935 ($msg: expr, $err_code: expr, $data: expr) => {
937 log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
938 if channel_state.is_none() {
939 channel_state = Some(self.channel_state.lock().unwrap());
941 return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
942 channel_id: msg.channel_id,
943 htlc_id: msg.htlc_id,
944 reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
945 })), channel_state.unwrap());
950 if msg.onion_routing_packet.version != 0 {
951 //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
952 //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
953 //the hash doesn't really serve any purpuse - in the case of hashing all data, the
954 //receiving node would have to brute force to figure out which version was put in the
955 //packet by the node that send us the message, in the case of hashing the hop_data, the
956 //node knows the HMAC matched, so they already know what is there...
957 return_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4, &get_onion_hash!());
960 let mut hmac = Hmac::new(Sha256::new(), &mu);
961 hmac.input(&msg.onion_routing_packet.hop_data);
962 hmac.input(&msg.payment_hash);
963 if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) {
964 return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!());
967 let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
968 let next_hop_data = {
969 let mut decoded = [0; 65];
970 chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
971 match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
973 let error_code = match err {
974 msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
975 _ => 0x2000 | 2, // Should never happen
977 return_err!("Unable to decode our hop data", error_code, &[0;0]);
983 let pending_forward_info = if next_hop_data.hmac == [0; 32] {
985 // final_expiry_too_soon
986 if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
987 return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
989 // final_incorrect_htlc_amount
990 if next_hop_data.data.amt_to_forward > msg.amount_msat {
991 return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
993 // final_incorrect_cltv_expiry
994 if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
995 return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
998 // Note that we could obviously respond immediately with an update_fulfill_htlc
999 // message, however that would leak that we are the recipient of this payment, so
1000 // instead we stay symmetric with the forwarding case, only responding (after a
1001 // delay) once they've send us a commitment_signed!
1003 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
1005 payment_hash: msg.payment_hash.clone(),
1006 short_channel_id: 0,
1007 incoming_shared_secret: shared_secret,
1008 amt_to_forward: next_hop_data.data.amt_to_forward,
1009 outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
1012 let mut new_packet_data = [0; 20*65];
1013 chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
1014 chacha.process(&ChannelManager::ZERO[0..65], &mut new_packet_data[19*65..]);
1016 let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
1018 let blinding_factor = {
1019 let mut sha = Sha256::new();
1020 sha.input(&new_pubkey.serialize()[..]);
1021 sha.input(&shared_secret);
1022 let mut res = [0u8; 32];
1023 sha.result(&mut res);
1024 match SecretKey::from_slice(&self.secp_ctx, &res) {
1026 return_err!("Blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
1032 if let Err(_) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) {
1033 return_err!("New blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
1036 let outgoing_packet = msgs::OnionPacket {
1038 public_key: Ok(new_pubkey),
1039 hop_data: new_packet_data,
1040 hmac: next_hop_data.hmac.clone(),
1043 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
1044 onion_packet: Some(outgoing_packet),
1045 payment_hash: msg.payment_hash.clone(),
1046 short_channel_id: next_hop_data.data.short_channel_id,
1047 incoming_shared_secret: shared_secret,
1048 amt_to_forward: next_hop_data.data.amt_to_forward,
1049 outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
1053 channel_state = Some(self.channel_state.lock().unwrap());
1054 if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
1055 if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
1056 let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
1057 let forwarding_id = match id_option {
1058 None => { // unknown_next_peer
1059 return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
1061 Some(id) => id.clone(),
1063 if let Some((err, code, chan_update)) = loop {
1064 let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
1066 // Note that we could technically not return an error yet here and just hope
1067 // that the connection is reestablished or monitor updated by the time we get
1068 // around to doing the actual forward, but better to fail early if we can and
1069 // hopefully an attacker trying to path-trace payments cannot make this occur
1070 // on a small/per-node/per-channel scale.
1071 if !chan.is_live() { // channel_disabled
1072 break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
1074 if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
1075 break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
1077 let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
1078 if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
1079 break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
1081 if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
1082 break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
1084 let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1085 // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
1086 if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
1087 break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
1089 if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
1090 break Some(("CLTV expiry is too far in the future", 21, None));
1095 let mut res = Vec::with_capacity(8 + 128);
1096 if code == 0x1000 | 11 || code == 0x1000 | 12 {
1097 res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
1099 else if code == 0x1000 | 13 {
1100 res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
1102 if let Some(chan_update) = chan_update {
1103 res.extend_from_slice(&chan_update.encode_with_len()[..]);
1105 return_err!(err, code, &res[..]);
1110 (pending_forward_info, channel_state.unwrap())
1113 /// only fails if the channel does not yet have an assigned short_id
1114 /// May be called with channel_state already locked!
1115 fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
1116 let short_channel_id = match chan.get_short_channel_id() {
1117 None => return Err(HandleError{err: "Channel not yet established", action: None}),
1121 let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
1123 let unsigned = msgs::UnsignedChannelUpdate {
1124 chain_hash: self.genesis_hash,
1125 short_channel_id: short_channel_id,
1126 timestamp: chan.get_channel_update_count(),
1127 flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
1128 cltv_expiry_delta: CLTV_EXPIRY_DELTA,
1129 htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
1130 fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
1131 fee_proportional_millionths: chan.get_fee_proportional_millionths(),
1132 excess_data: Vec::new(),
1135 let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
1136 let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key);
1138 Ok(msgs::ChannelUpdate {
1144 /// Sends a payment along a given route.
1146 /// Value parameters are provided via the last hop in route, see documentation for RouteHop
1147 /// fields for more info.
1149 /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
1150 /// payment), we don't do anything to stop you! We always try to ensure that if the provided
1151 /// next hop knows the preimage to payment_hash they can claim an additional amount as
1152 /// specified in the last hop in the route! Thus, you should probably do your own
1153 /// payment_preimage tracking (which you should already be doing as they represent "proof of
1154 /// payment") and prevent double-sends yourself.
1156 /// May generate a SendHTLCs message event on success, which should be relayed.
1158 /// Raises APIError::RoutError when invalid route or forward parameter
1159 /// (cltv_delta, fee, node public key) is specified
1160 pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
1161 if route.hops.len() < 1 || route.hops.len() > 20 {
1162 return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
1164 let our_node_id = self.get_our_node_id();
1165 for (idx, hop) in route.hops.iter().enumerate() {
1166 if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
1167 return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
1171 let session_priv = SecretKey::from_slice(&self.secp_ctx, &{
1172 let mut session_key = [0; 32];
1173 rng::fill_bytes(&mut session_key);
1175 }).expect("RNG is bad!");
1177 let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1179 let onion_keys = secp_call!(ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
1180 APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
1181 let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
1182 let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
1184 let _ = self.total_consistency_lock.read().unwrap();
1185 let mut channel_state = self.channel_state.lock().unwrap();
1187 let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
1188 None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
1189 Some(id) => id.clone(),
1193 let chan = channel_state.by_id.get_mut(&id).unwrap();
1194 if chan.get_their_node_id() != route.hops.first().unwrap().pubkey {
1195 return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
1197 if chan.is_awaiting_monitor_update() {
1198 return Err(APIError::MonitorUpdateFailed);
1200 if !chan.is_live() {
1201 return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
1203 chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
1204 route: route.clone(),
1205 session_priv: session_priv.clone(),
1206 first_hop_htlc_msat: htlc_msat,
1207 }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
1210 Some((update_add, commitment_signed, chan_monitor)) => {
1211 if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1212 self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst);
1213 return Err(APIError::MonitorUpdateFailed);
1216 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1217 node_id: route.hops.first().unwrap().pubkey,
1218 updates: msgs::CommitmentUpdate {
1219 update_add_htlcs: vec![update_add],
1220 update_fulfill_htlcs: Vec::new(),
1221 update_fail_htlcs: Vec::new(),
1222 update_fail_malformed_htlcs: Vec::new(),
1234 /// Call this upon creation of a funding transaction for the given channel.
1236 /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
1237 /// or your counterparty can steal your funds!
1239 /// Panics if a funding transaction has already been provided for this channel.
1241 /// May panic if the funding_txo is duplicative with some other channel (note that this should
1242 /// be trivially prevented by using unique funding transaction keys per-channel).
1243 pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
1244 let _ = self.total_consistency_lock.read().unwrap();
1246 let (chan, msg, chan_monitor) = {
1247 let mut channel_state = self.channel_state.lock().unwrap();
1248 match channel_state.by_id.remove(temporary_channel_id) {
1250 match chan.get_outbound_funding_created(funding_txo) {
1251 Ok(funding_msg) => {
1252 (chan, funding_msg.0, funding_msg.1)
1255 log_error!(self, "Got bad signatures: {}!", e.err);
1256 channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
1257 node_id: chan.get_their_node_id(),
1267 // Because we have exclusive ownership of the channel here we can release the channel_state
1268 // lock before add_update_monitor
1269 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1273 let mut channel_state = self.channel_state.lock().unwrap();
1274 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
1275 node_id: chan.get_their_node_id(),
1278 match channel_state.by_id.entry(chan.channel_id()) {
1279 hash_map::Entry::Occupied(_) => {
1280 panic!("Generated duplicate funding txid?");
1282 hash_map::Entry::Vacant(e) => {
1288 fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
1289 if !chan.should_announce() { return None }
1291 let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
1293 Err(_) => return None, // Only in case of state precondition violations eg channel is closing
1295 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
1296 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
1298 Some(msgs::AnnouncementSignatures {
1299 channel_id: chan.channel_id(),
1300 short_channel_id: chan.get_short_channel_id().unwrap(),
1301 node_signature: our_node_sig,
1302 bitcoin_signature: our_bitcoin_sig,
1306 /// Processes HTLCs which are pending waiting on random forward delay.
1308 /// Should only really ever be called in response to an PendingHTLCsForwardable event.
1309 /// Will likely generate further events.
1310 pub fn process_pending_htlc_forwards(&self) {
1311 let _ = self.total_consistency_lock.read().unwrap();
1313 let mut new_events = Vec::new();
1314 let mut failed_forwards = Vec::new();
1316 let mut channel_state_lock = self.channel_state.lock().unwrap();
1317 let channel_state = channel_state_lock.borrow_parts();
1319 if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward {
1323 for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
1324 if short_chan_id != 0 {
1325 let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
1326 Some(chan_id) => chan_id.clone(),
1328 failed_forwards.reserve(pending_forwards.len());
1329 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1330 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1331 short_channel_id: prev_short_channel_id,
1332 htlc_id: prev_htlc_id,
1333 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1335 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
1340 let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
1342 let mut add_htlc_msgs = Vec::new();
1343 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1344 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1345 short_channel_id: prev_short_channel_id,
1346 htlc_id: prev_htlc_id,
1347 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1349 match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
1351 let chan_update = self.get_channel_update(forward_chan).unwrap();
1352 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
1357 Some(msg) => { add_htlc_msgs.push(msg); },
1359 // Nothing to do here...we're waiting on a remote
1360 // revoke_and_ack before we can add anymore HTLCs. The Channel
1361 // will automatically handle building the update_add_htlc and
1362 // commitment_signed messages when we can.
1363 // TODO: Do some kind of timer to set the channel as !is_live()
1364 // as we don't really want others relying on us relaying through
1365 // this channel currently :/.
1372 if !add_htlc_msgs.is_empty() {
1373 let (commitment_msg, monitor) = match forward_chan.send_commitment() {
1376 if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action {
1377 } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action {
1379 panic!("Stated return value requirements in send_commitment() were not met");
1381 //TODO: Handle...this is bad!
1385 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
1386 unimplemented!();// but def dont push the event...
1388 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1389 node_id: forward_chan.get_their_node_id(),
1390 updates: msgs::CommitmentUpdate {
1391 update_add_htlcs: add_htlc_msgs,
1392 update_fulfill_htlcs: Vec::new(),
1393 update_fail_htlcs: Vec::new(),
1394 update_fail_malformed_htlcs: Vec::new(),
1396 commitment_signed: commitment_msg,
1401 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1402 let prev_hop_data = HTLCPreviousHopData {
1403 short_channel_id: prev_short_channel_id,
1404 htlc_id: prev_htlc_id,
1405 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1407 match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
1408 hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
1409 hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
1411 new_events.push(events::Event::PaymentReceived {
1412 payment_hash: forward_info.payment_hash,
1413 amt: forward_info.amt_to_forward,
1420 for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
1422 None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
1423 Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
1427 if new_events.is_empty() { return }
1428 let mut events = self.pending_events.lock().unwrap();
1429 events.append(&mut new_events);
1432 /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event.
1433 pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool {
1434 let _ = self.total_consistency_lock.read().unwrap();
1436 let mut channel_state = Some(self.channel_state.lock().unwrap());
1437 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
1438 if let Some(mut sources) = removed_source {
1439 for htlc_with_hash in sources.drain(..) {
1440 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1441 self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: if reason == PaymentFailReason::PreimageUnknown {0x4000 | 15} else {0x4000 | 16}, data: Vec::new() });
1447 /// Fails an HTLC backwards to the sender of it to us.
1448 /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
1449 /// There are several callsites that do stupid things like loop over a list of payment_hashes
1450 /// to fail and take the channel_state lock for each iteration (as we take ownership and may
1451 /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
1452 /// still-available channels.
1453 fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) {
1455 HTLCSource::OutboundRoute { .. } => {
1456 mem::drop(channel_state_lock);
1457 if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
1458 let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
1459 if let Some(update) = channel_update {
1460 self.channel_state.lock().unwrap().pending_msg_events.push(
1461 events::MessageSendEvent::PaymentFailureNetworkUpdate {
1466 self.pending_events.lock().unwrap().push(events::Event::PaymentFailed {
1467 payment_hash: payment_hash.clone(),
1468 rejected_by_dest: !payment_retryable,
1471 panic!("should have onion error packet here");
1474 HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
1475 let err_packet = match onion_error {
1476 HTLCFailReason::Reason { failure_code, data } => {
1477 let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
1478 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
1480 HTLCFailReason::ErrorPacket { err } => {
1481 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
1485 let channel_state = channel_state_lock.borrow_parts();
1487 let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1488 Some(chan_id) => chan_id.clone(),
1492 let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1493 match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
1494 Ok(Some((msg, commitment_msg, chan_monitor))) => {
1495 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1498 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1499 node_id: chan.get_their_node_id(),
1500 updates: msgs::CommitmentUpdate {
1501 update_add_htlcs: Vec::new(),
1502 update_fulfill_htlcs: Vec::new(),
1503 update_fail_htlcs: vec![msg],
1504 update_fail_malformed_htlcs: Vec::new(),
1506 commitment_signed: commitment_msg,
1512 //TODO: Do something with e?
1520 /// Provides a payment preimage in response to a PaymentReceived event, returning true and
1521 /// generating message events for the net layer to claim the payment, if possible. Thus, you
1522 /// should probably kick the net layer to go send messages if this returns true!
1524 /// May panic if called except in response to a PaymentReceived event.
1525 pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
1526 let mut sha = Sha256::new();
1527 sha.input(&payment_preimage);
1528 let mut payment_hash = [0; 32];
1529 sha.result(&mut payment_hash);
1531 let _ = self.total_consistency_lock.read().unwrap();
1533 let mut channel_state = Some(self.channel_state.lock().unwrap());
1534 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
1535 if let Some(mut sources) = removed_source {
1536 for htlc_with_hash in sources.drain(..) {
1537 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1538 self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
1543 fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: [u8; 32]) {
1545 HTLCSource::OutboundRoute { .. } => {
1546 mem::drop(channel_state_lock);
1547 let mut pending_events = self.pending_events.lock().unwrap();
1548 pending_events.push(events::Event::PaymentSent {
1552 HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
1553 //TODO: Delay the claimed_funds relaying just like we do outbound relay!
1554 let channel_state = channel_state_lock.borrow_parts();
1556 let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1557 Some(chan_id) => chan_id.clone(),
1559 // TODO: There is probably a channel manager somewhere that needs to
1560 // learn the preimage as the channel already hit the chain and that's
1566 let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1567 match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
1568 Ok((msgs, monitor_option)) => {
1569 if let Some(chan_monitor) = monitor_option {
1570 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1571 unimplemented!();// but def dont push the event...
1574 if let Some((msg, commitment_signed)) = msgs {
1575 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1576 node_id: chan.get_their_node_id(),
1577 updates: msgs::CommitmentUpdate {
1578 update_add_htlcs: Vec::new(),
1579 update_fulfill_htlcs: vec![msg],
1580 update_fail_htlcs: Vec::new(),
1581 update_fail_malformed_htlcs: Vec::new(),
1589 // TODO: There is probably a channel manager somewhere that needs to
1590 // learn the preimage as the channel may be about to hit the chain.
1591 //TODO: Do something with e?
1599 /// Gets the node_id held by this ChannelManager
1600 pub fn get_our_node_id(&self) -> PublicKey {
1601 PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
1604 /// Used to restore channels to normal operation after a
1605 /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
1607 pub fn test_restore_channel_monitor(&self) {
1608 let mut close_results = Vec::new();
1609 let mut htlc_forwards = Vec::new();
1610 let mut htlc_failures = Vec::new();
1611 let _ = self.total_consistency_lock.read().unwrap();
1614 let mut channel_lock = self.channel_state.lock().unwrap();
1615 let channel_state = channel_lock.borrow_parts();
1616 let short_to_id = channel_state.short_to_id;
1617 let pending_msg_events = channel_state.pending_msg_events;
1618 channel_state.by_id.retain(|_, channel| {
1619 if channel.is_awaiting_monitor_update() {
1620 let chan_monitor = channel.channel_monitor();
1621 if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1623 ChannelMonitorUpdateErr::PermanentFailure => {
1624 if let Some(short_id) = channel.get_short_channel_id() {
1625 short_to_id.remove(&short_id);
1627 close_results.push(channel.force_shutdown());
1628 if let Ok(update) = self.get_channel_update(&channel) {
1629 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1635 ChannelMonitorUpdateErr::TemporaryFailure => true,
1638 let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored();
1639 if !pending_forwards.is_empty() {
1640 htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
1642 htlc_failures.append(&mut pending_failures);
1644 macro_rules! handle_cs { () => {
1645 if let Some(update) = commitment_update {
1646 pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1647 node_id: channel.get_their_node_id(),
1652 macro_rules! handle_raa { () => {
1653 if let Some(revoke_and_ack) = raa {
1654 pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
1655 node_id: channel.get_their_node_id(),
1656 msg: revoke_and_ack,
1661 RAACommitmentOrder::CommitmentFirst => {
1665 RAACommitmentOrder::RevokeAndACKFirst => {
1676 for failure in htlc_failures.drain(..) {
1677 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
1679 self.forward_htlcs(&mut htlc_forwards[..]);
1681 for res in close_results.drain(..) {
1682 self.finish_force_close_channel(res);
1686 fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
1687 if msg.chain_hash != self.genesis_hash {
1688 return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
1691 let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration)
1692 .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
1693 let mut channel_state_lock = self.channel_state.lock().unwrap();
1694 let channel_state = channel_state_lock.borrow_parts();
1695 match channel_state.by_id.entry(channel.channel_id()) {
1696 hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
1697 hash_map::Entry::Vacant(entry) => {
1698 channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
1699 node_id: their_node_id.clone(),
1700 msg: channel.get_accept_channel(),
1702 entry.insert(channel);
1708 fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
1709 let (value, output_script, user_id) = {
1710 let mut channel_state = self.channel_state.lock().unwrap();
1711 match channel_state.by_id.get_mut(&msg.temporary_channel_id) {
1713 if chan.get_their_node_id() != *their_node_id {
1714 //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
1715 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1717 chan.accept_channel(&msg, &self.default_configuration)
1718 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?;
1719 (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id())
1721 //TODO: same as above
1722 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1725 let mut pending_events = self.pending_events.lock().unwrap();
1726 pending_events.push(events::Event::FundingGenerationReady {
1727 temporary_channel_id: msg.temporary_channel_id,
1728 channel_value_satoshis: value,
1729 output_script: output_script,
1730 user_channel_id: user_id,
1735 fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
1736 let (chan, funding_msg, monitor_update) = {
1737 let mut channel_state = self.channel_state.lock().unwrap();
1738 match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
1739 hash_map::Entry::Occupied(mut chan) => {
1740 if chan.get().get_their_node_id() != *their_node_id {
1741 //TODO: here and below MsgHandleErrInternal, #153 case
1742 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1744 match chan.get_mut().funding_created(msg) {
1745 Ok((funding_msg, monitor_update)) => {
1746 (chan.remove(), funding_msg, monitor_update)
1749 return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1753 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1756 // Because we have exclusive ownership of the channel here we can release the channel_state
1757 // lock before add_update_monitor
1758 if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
1761 let mut channel_state_lock = self.channel_state.lock().unwrap();
1762 let channel_state = channel_state_lock.borrow_parts();
1763 match channel_state.by_id.entry(funding_msg.channel_id) {
1764 hash_map::Entry::Occupied(_) => {
1765 return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
1767 hash_map::Entry::Vacant(e) => {
1768 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
1769 node_id: their_node_id.clone(),
1778 fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
1779 let (funding_txo, user_id) = {
1780 let mut channel_state = self.channel_state.lock().unwrap();
1781 match channel_state.by_id.get_mut(&msg.channel_id) {
1783 if chan.get_their_node_id() != *their_node_id {
1784 //TODO: here and below MsgHandleErrInternal, #153 case
1785 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1787 let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1788 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1791 (chan.get_funding_txo().unwrap(), chan.get_user_id())
1793 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1796 let mut pending_events = self.pending_events.lock().unwrap();
1797 pending_events.push(events::Event::FundingBroadcastSafe {
1798 funding_txo: funding_txo,
1799 user_channel_id: user_id,
1804 fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
1805 let mut channel_state_lock = self.channel_state.lock().unwrap();
1806 let channel_state = channel_state_lock.borrow_parts();
1807 match channel_state.by_id.get_mut(&msg.channel_id) {
1809 if chan.get_their_node_id() != *their_node_id {
1810 //TODO: here and below MsgHandleErrInternal, #153 case
1811 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1813 chan.funding_locked(&msg)
1814 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1815 if let Some(announcement_sigs) = self.get_announcement_sigs(chan) {
1816 channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
1817 node_id: their_node_id.clone(),
1818 msg: announcement_sigs,
1823 None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1827 fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
1828 let (mut dropped_htlcs, chan_option) = {
1829 let mut channel_state_lock = self.channel_state.lock().unwrap();
1830 let channel_state = channel_state_lock.borrow_parts();
1832 match channel_state.by_id.entry(msg.channel_id.clone()) {
1833 hash_map::Entry::Occupied(mut chan_entry) => {
1834 if chan_entry.get().get_their_node_id() != *their_node_id {
1835 //TODO: here and below MsgHandleErrInternal, #153 case
1836 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1838 let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1839 if let Some(msg) = shutdown {
1840 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
1841 node_id: their_node_id.clone(),
1845 if let Some(msg) = closing_signed {
1846 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
1847 node_id: their_node_id.clone(),
1851 if chan_entry.get().is_shutdown() {
1852 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1853 channel_state.short_to_id.remove(&short_id);
1855 (dropped_htlcs, Some(chan_entry.remove_entry().1))
1856 } else { (dropped_htlcs, None) }
1858 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1861 for htlc_source in dropped_htlcs.drain(..) {
1862 // unknown_next_peer...I dunno who that is anymore....
1863 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
1865 if let Some(chan) = chan_option {
1866 if let Ok(update) = self.get_channel_update(&chan) {
1867 let mut channel_state = self.channel_state.lock().unwrap();
1868 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1876 fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
1877 let (tx, chan_option) = {
1878 let mut channel_state_lock = self.channel_state.lock().unwrap();
1879 let channel_state = channel_state_lock.borrow_parts();
1880 match channel_state.by_id.entry(msg.channel_id.clone()) {
1881 hash_map::Entry::Occupied(mut chan_entry) => {
1882 if chan_entry.get().get_their_node_id() != *their_node_id {
1883 //TODO: here and below MsgHandleErrInternal, #153 case
1884 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1886 let (closing_signed, tx) = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1887 if let Some(msg) = closing_signed {
1888 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
1889 node_id: their_node_id.clone(),
1894 // We're done with this channel, we've got a signed closing transaction and
1895 // will send the closing_signed back to the remote peer upon return. This
1896 // also implies there are no pending HTLCs left on the channel, so we can
1897 // fully delete it from tracking (the channel monitor is still around to
1898 // watch for old state broadcasts)!
1899 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1900 channel_state.short_to_id.remove(&short_id);
1902 (tx, Some(chan_entry.remove_entry().1))
1903 } else { (tx, None) }
1905 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1908 if let Some(broadcast_tx) = tx {
1909 self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
1911 if let Some(chan) = chan_option {
1912 if let Ok(update) = self.get_channel_update(&chan) {
1913 let mut channel_state = self.channel_state.lock().unwrap();
1914 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1922 fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
1923 //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
1924 //determine the state of the payment based on our response/if we forward anything/the time
1925 //we take to respond. We should take care to avoid allowing such an attack.
1927 //TODO: There exists a further attack where a node may garble the onion data, forward it to
1928 //us repeatedly garbled in different ways, and compare our error messages, which are
1929 //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
1930 //but we should prevent it anyway.
1932 let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
1933 let channel_state = channel_state_lock.borrow_parts();
1935 match channel_state.by_id.get_mut(&msg.channel_id) {
1937 if chan.get_their_node_id() != *their_node_id {
1938 //TODO: here MsgHandleErrInternal, #153 case
1939 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1941 if !chan.is_usable() {
1942 // If the update_add is completely bogus, the call will Err and we will close,
1943 // but if we've sent a shutdown and they haven't acknowledged it yet, we just
1944 // want to reject the new HTLC and fail it backwards instead of forwarding.
1945 if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
1946 pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
1947 channel_id: msg.channel_id,
1948 htlc_id: msg.htlc_id,
1949 reason: ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &self.get_channel_update(chan).unwrap().encode_with_len()[..]),
1953 chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1955 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1959 fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
1960 let mut channel_state = self.channel_state.lock().unwrap();
1961 let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
1963 if chan.get_their_node_id() != *their_node_id {
1964 //TODO: here and below MsgHandleErrInternal, #153 case
1965 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1967 chan.update_fulfill_htlc(&msg)
1968 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone()
1970 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1972 self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone());
1976 // Process failure we got back from upstream on a payment we sent. Returns update and a boolean
1977 // indicating that the payment itself failed
1978 fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool) {
1979 if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
1980 macro_rules! onion_failure_log {
1981 ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => {
1982 log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value);
1984 ( $error_code_textual: expr, $error_code: expr ) => {
1985 log_trace!(self, "{}({})", $error_code_textual, $error_code);
1989 const BADONION: u16 = 0x8000;
1990 const PERM: u16 = 0x4000;
1991 const UPDATE: u16 = 0x1000;
1994 let mut htlc_msat = *first_hop_htlc_msat;
1996 // Handle packed channel/node updates for passing back for the route handler
1997 Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
1998 if res.is_some() { return; }
2000 let incoming_htlc_msat = htlc_msat;
2001 let amt_to_forward = htlc_msat - route_hop.fee_msat;
2002 htlc_msat = amt_to_forward;
2004 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret[..]);
2006 let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
2007 decryption_tmp.resize(packet_decrypted.len(), 0);
2008 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
2009 chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
2010 packet_decrypted = decryption_tmp;
2012 let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
2014 if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
2015 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret[..]);
2016 let mut hmac = Hmac::new(Sha256::new(), &um);
2017 hmac.input(&err_packet.encode()[32..]);
2018 let mut calc_tag = [0u8; 32];
2019 hmac.raw_result(&mut calc_tag);
2021 if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
2022 if err_packet.failuremsg.len() < 2 {
2023 // Useless packet that we can't use but it passed HMAC, so it
2024 // definitely came from the peer in question
2025 res = Some((None, !is_from_final_node));
2027 let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]);
2029 match error_code & 0xff {
2031 // either from an intermediate or final node
2032 // invalid_realm(PERM|1),
2033 // temporary_node_failure(NODE|2)
2034 // permanent_node_failure(PERM|NODE|2)
2035 // required_node_feature_mssing(PERM|NODE|3)
2036 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2037 node_id: route_hop.pubkey,
2038 is_permanent: error_code & PERM == PERM,
2039 }), !(error_code & PERM == PERM && is_from_final_node)));
2040 // node returning invalid_realm is removed from network_map,
2041 // although NODE flag is not set, TODO: or remove channel only?
2042 // retry payment when removed node is not a final node
2048 if is_from_final_node {
2049 let payment_retryable = match error_code {
2050 c if c == PERM|15 => false, // unknown_payment_hash
2051 c if c == PERM|16 => false, // incorrect_payment_amount
2052 17 => true, // final_expiry_too_soon
2053 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry
2054 let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
2057 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount
2058 let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
2062 // A final node has sent us either an invalid code or an error_code that
2063 // MUST be sent from the processing node, or the formmat of failuremsg
2064 // does not coform to the spec.
2065 // Remove it from the network map and don't may retry payment
2066 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2067 node_id: route_hop.pubkey,
2073 res = Some((None, payment_retryable));
2077 // now, error_code should be only from the intermediate nodes
2079 _c if error_code & PERM == PERM => {
2080 res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
2081 short_channel_id: route_hop.short_channel_id,
2085 _c if error_code & UPDATE == UPDATE => {
2086 let offset = match error_code {
2087 c if c == UPDATE|7 => 0, // temporary_channel_failure
2088 c if c == UPDATE|11 => 8, // amount_below_minimum
2089 c if c == UPDATE|12 => 8, // fee_insufficient
2090 c if c == UPDATE|13 => 4, // incorrect_cltv_expiry
2091 c if c == UPDATE|14 => 0, // expiry_too_soon
2092 c if c == UPDATE|20 => 2, // channel_disabled
2094 // node sending unknown code
2095 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2096 node_id: route_hop.pubkey,
2103 if err_packet.failuremsg.len() >= offset + 2 {
2104 let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize;
2105 if err_packet.failuremsg.len() >= offset + 4 + update_len {
2106 if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) {
2107 // if channel_update should NOT have caused the failure:
2108 // MAY treat the channel_update as invalid.
2109 let is_chan_update_invalid = match error_code {
2110 c if c == UPDATE|7 => { // temporary_channel_failure
2113 c if c == UPDATE|11 => { // amount_below_minimum
2114 let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
2115 onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat);
2116 incoming_htlc_msat > chan_update.contents.htlc_minimum_msat
2118 c if c == UPDATE|12 => { // fee_insufficient
2119 let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
2120 let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
2121 onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat);
2122 new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap()
2124 c if c == UPDATE|13 => { // incorrect_cltv_expiry
2125 let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
2126 onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry);
2127 route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta
2129 c if c == UPDATE|20 => { // channel_disabled
2130 let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]);
2131 onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags);
2132 chan_update.contents.flags & 0x01 == 0x01
2134 c if c == UPDATE|21 => true, // expiry_too_far
2135 _ => { unreachable!(); },
2138 let msg = if is_chan_update_invalid { None } else {
2139 Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
2143 res = Some((msg, true));
2149 _c if error_code & BADONION == BADONION => {
2152 14 => { // expiry_too_soon
2153 res = Some((None, true));
2157 // node sending unknown code
2158 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2159 node_id: route_hop.pubkey,
2168 }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
2169 res.unwrap_or((None, true))
2170 } else { ((None, true)) }
2173 fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
2174 let mut channel_state = self.channel_state.lock().unwrap();
2175 match channel_state.by_id.get_mut(&msg.channel_id) {
2177 if chan.get_their_node_id() != *their_node_id {
2178 //TODO: here and below MsgHandleErrInternal, #153 case
2179 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2181 chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
2182 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
2184 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2189 fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
2190 let mut channel_state = self.channel_state.lock().unwrap();
2191 match channel_state.by_id.get_mut(&msg.channel_id) {
2193 if chan.get_their_node_id() != *their_node_id {
2194 //TODO: here and below MsgHandleErrInternal, #153 case
2195 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2197 if (msg.failure_code & 0x8000) == 0 {
2198 return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id));
2200 chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
2201 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2204 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2208 fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
2209 let mut channel_state_lock = self.channel_state.lock().unwrap();
2210 let channel_state = channel_state_lock.borrow_parts();
2211 match channel_state.by_id.get_mut(&msg.channel_id) {
2213 if chan.get_their_node_id() != *their_node_id {
2214 //TODO: here and below MsgHandleErrInternal, #153 case
2215 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2217 let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
2218 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2221 channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
2222 node_id: their_node_id.clone(),
2223 msg: revoke_and_ack,
2225 if let Some(msg) = commitment_signed {
2226 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2227 node_id: their_node_id.clone(),
2228 updates: msgs::CommitmentUpdate {
2229 update_add_htlcs: Vec::new(),
2230 update_fulfill_htlcs: Vec::new(),
2231 update_fail_htlcs: Vec::new(),
2232 update_fail_malformed_htlcs: Vec::new(),
2234 commitment_signed: msg,
2238 if let Some(msg) = closing_signed {
2239 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
2240 node_id: their_node_id.clone(),
2246 None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2251 fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
2252 for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
2253 let mut forward_event = None;
2254 if !pending_forwards.is_empty() {
2255 let mut channel_state = self.channel_state.lock().unwrap();
2256 if channel_state.forward_htlcs.is_empty() {
2257 forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
2258 channel_state.next_forward = forward_event.unwrap();
2260 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
2261 match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
2262 hash_map::Entry::Occupied(mut entry) => {
2263 entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info });
2265 hash_map::Entry::Vacant(entry) => {
2266 entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }));
2271 match forward_event {
2273 let mut pending_events = self.pending_events.lock().unwrap();
2274 pending_events.push(events::Event::PendingHTLCsForwardable {
2275 time_forwardable: time
2283 fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
2284 let (pending_forwards, mut pending_failures, short_channel_id) = {
2285 let mut channel_state_lock = self.channel_state.lock().unwrap();
2286 let channel_state = channel_state_lock.borrow_parts();
2287 match channel_state.by_id.get_mut(&msg.channel_id) {
2289 if chan.get_their_node_id() != *their_node_id {
2290 //TODO: here and below MsgHandleErrInternal, #153 case
2291 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2293 let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
2294 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2297 if let Some(updates) = commitment_update {
2298 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2299 node_id: their_node_id.clone(),
2303 if let Some(msg) = closing_signed {
2304 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
2305 node_id: their_node_id.clone(),
2309 (pending_forwards, pending_failures, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
2311 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2314 for failure in pending_failures.drain(..) {
2315 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
2317 self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
2322 fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
2323 let mut channel_state = self.channel_state.lock().unwrap();
2324 match channel_state.by_id.get_mut(&msg.channel_id) {
2326 if chan.get_their_node_id() != *their_node_id {
2327 //TODO: here and below MsgHandleErrInternal, #153 case
2328 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2330 chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
2332 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2336 fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
2337 let mut channel_state_lock = self.channel_state.lock().unwrap();
2338 let channel_state = channel_state_lock.borrow_parts();
2340 match channel_state.by_id.get_mut(&msg.channel_id) {
2342 if chan.get_their_node_id() != *their_node_id {
2343 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2345 if !chan.is_usable() {
2346 return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
2349 let our_node_id = self.get_our_node_id();
2350 let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())
2351 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2353 let were_node_one = announcement.node_id_1 == our_node_id;
2354 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
2355 let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id);
2356 secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action);
2357 secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action);
2359 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
2361 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
2362 msg: msgs::ChannelAnnouncement {
2363 node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
2364 node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
2365 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
2366 bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
2367 contents: announcement,
2369 update_msg: self.get_channel_update(chan).unwrap(), // can only fail if we're not in a ready state
2372 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2377 fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
2378 let mut channel_state_lock = self.channel_state.lock().unwrap();
2379 let channel_state = channel_state_lock.borrow_parts();
2381 match channel_state.by_id.get_mut(&msg.channel_id) {
2383 if chan.get_their_node_id() != *their_node_id {
2384 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2386 let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg)
2387 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2388 if let Some(monitor) = channel_monitor {
2389 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
2393 if let Some(msg) = funding_locked {
2394 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
2395 node_id: their_node_id.clone(),
2399 macro_rules! send_raa { () => {
2400 if let Some(msg) = revoke_and_ack {
2401 channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
2402 node_id: their_node_id.clone(),
2407 macro_rules! send_cu { () => {
2408 if let Some(updates) = commitment_update {
2409 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2410 node_id: their_node_id.clone(),
2416 RAACommitmentOrder::RevokeAndACKFirst => {
2420 RAACommitmentOrder::CommitmentFirst => {
2425 if let Some(msg) = shutdown {
2426 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
2427 node_id: their_node_id.clone(),
2433 None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2437 /// Begin Update fee process. Allowed only on an outbound channel.
2438 /// If successful, will generate a UpdateHTLCs event, so you should probably poll
2439 /// PeerManager::process_events afterwards.
2440 /// Note: This API is likely to change!
2442 pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
2443 let _ = self.total_consistency_lock.read().unwrap();
2444 let mut channel_state_lock = self.channel_state.lock().unwrap();
2445 let channel_state = channel_state_lock.borrow_parts();
2447 match channel_state.by_id.get_mut(&channel_id) {
2448 None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
2450 if !chan.is_outbound() {
2451 return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
2453 if chan.is_awaiting_monitor_update() {
2454 return Err(APIError::MonitorUpdateFailed);
2456 if !chan.is_live() {
2457 return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
2459 if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? {
2460 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2463 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2464 node_id: chan.get_their_node_id(),
2465 updates: msgs::CommitmentUpdate {
2466 update_add_htlcs: Vec::new(),
2467 update_fulfill_htlcs: Vec::new(),
2468 update_fail_htlcs: Vec::new(),
2469 update_fail_malformed_htlcs: Vec::new(),
2470 update_fee: Some(update_fee),
2481 impl events::MessageSendEventsProvider for ChannelManager {
2482 fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
2483 let mut ret = Vec::new();
2484 let mut channel_state = self.channel_state.lock().unwrap();
2485 mem::swap(&mut ret, &mut channel_state.pending_msg_events);
2490 impl events::EventsProvider for ChannelManager {
2491 fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
2492 let mut ret = Vec::new();
2493 let mut pending_events = self.pending_events.lock().unwrap();
2494 mem::swap(&mut ret, &mut *pending_events);
2499 impl ChainListener for ChannelManager {
2500 fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
2501 let _ = self.total_consistency_lock.read().unwrap();
2502 let mut failed_channels = Vec::new();
2504 let mut channel_lock = self.channel_state.lock().unwrap();
2505 let channel_state = channel_lock.borrow_parts();
2506 let short_to_id = channel_state.short_to_id;
2507 let pending_msg_events = channel_state.pending_msg_events;
2508 channel_state.by_id.retain(|_, channel| {
2509 let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
2510 if let Ok(Some(funding_locked)) = chan_res {
2511 pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
2512 node_id: channel.get_their_node_id(),
2513 msg: funding_locked,
2515 if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
2516 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
2517 node_id: channel.get_their_node_id(),
2518 msg: announcement_sigs,
2521 short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
2522 } else if let Err(e) = chan_res {
2523 pending_msg_events.push(events::MessageSendEvent::HandleError {
2524 node_id: channel.get_their_node_id(),
2527 if channel.is_shutdown() {
2531 if let Some(funding_txo) = channel.get_funding_txo() {
2532 for tx in txn_matched {
2533 for inp in tx.input.iter() {
2534 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
2535 if let Some(short_id) = channel.get_short_channel_id() {
2536 short_to_id.remove(&short_id);
2538 // It looks like our counterparty went on-chain. We go ahead and
2539 // broadcast our latest local state as well here, just in case its
2540 // some kind of SPV attack, though we expect these to be dropped.
2541 failed_channels.push(channel.force_shutdown());
2542 if let Ok(update) = self.get_channel_update(&channel) {
2543 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2552 if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
2553 if let Some(short_id) = channel.get_short_channel_id() {
2554 short_to_id.remove(&short_id);
2556 failed_channels.push(channel.force_shutdown());
2557 // If would_broadcast_at_height() is true, the channel_monitor will broadcast
2558 // the latest local tx for us, so we should skip that here (it doesn't really
2559 // hurt anything, but does make tests a bit simpler).
2560 failed_channels.last_mut().unwrap().0 = Vec::new();
2561 if let Ok(update) = self.get_channel_update(&channel) {
2562 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2571 for failure in failed_channels.drain(..) {
2572 self.finish_force_close_channel(failure);
2574 self.latest_block_height.store(height as usize, Ordering::Release);
2575 *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
2578 /// We force-close the channel without letting our counterparty participate in the shutdown
2579 fn block_disconnected(&self, header: &BlockHeader) {
2580 let _ = self.total_consistency_lock.read().unwrap();
2581 let mut failed_channels = Vec::new();
2583 let mut channel_lock = self.channel_state.lock().unwrap();
2584 let channel_state = channel_lock.borrow_parts();
2585 let short_to_id = channel_state.short_to_id;
2586 let pending_msg_events = channel_state.pending_msg_events;
2587 channel_state.by_id.retain(|_, v| {
2588 if v.block_disconnected(header) {
2589 if let Some(short_id) = v.get_short_channel_id() {
2590 short_to_id.remove(&short_id);
2592 failed_channels.push(v.force_shutdown());
2593 if let Ok(update) = self.get_channel_update(&v) {
2594 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2604 for failure in failed_channels.drain(..) {
2605 self.finish_force_close_channel(failure);
2607 self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
2608 *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
2612 macro_rules! handle_error {
2613 ($self: ident, $internal: expr, $their_node_id: expr) => {
2616 Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
2617 if needs_channel_force_close {
2619 &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
2620 if msg.channel_id == [0; 32] {
2621 $self.peer_disconnected(&$their_node_id, true);
2623 $self.force_close_channel(&msg.channel_id);
2626 &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
2627 &Some(msgs::ErrorAction::IgnoreError) => {},
2628 &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
2629 if msg.channel_id == [0; 32] {
2630 $self.peer_disconnected(&$their_node_id, true);
2632 $self.force_close_channel(&msg.channel_id);
2644 impl ChannelMessageHandler for ChannelManager {
2645 //TODO: Handle errors and close channel (or so)
2646 fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
2647 let _ = self.total_consistency_lock.read().unwrap();
2648 handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id)
2651 fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
2652 let _ = self.total_consistency_lock.read().unwrap();
2653 handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id)
2656 fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {
2657 let _ = self.total_consistency_lock.read().unwrap();
2658 handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id)
2661 fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
2662 let _ = self.total_consistency_lock.read().unwrap();
2663 handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id)
2666 fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> {
2667 let _ = self.total_consistency_lock.read().unwrap();
2668 handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id)
2671 fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> {
2672 let _ = self.total_consistency_lock.read().unwrap();
2673 handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id)
2676 fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> {
2677 let _ = self.total_consistency_lock.read().unwrap();
2678 handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id)
2681 fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
2682 let _ = self.total_consistency_lock.read().unwrap();
2683 handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id)
2686 fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
2687 let _ = self.total_consistency_lock.read().unwrap();
2688 handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
2691 fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
2692 let _ = self.total_consistency_lock.read().unwrap();
2693 handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
2696 fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
2697 let _ = self.total_consistency_lock.read().unwrap();
2698 handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id)
2701 fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> {
2702 let _ = self.total_consistency_lock.read().unwrap();
2703 handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id)
2706 fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> {
2707 let _ = self.total_consistency_lock.read().unwrap();
2708 handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id)
2711 fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
2712 let _ = self.total_consistency_lock.read().unwrap();
2713 handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id)
2716 fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
2717 let _ = self.total_consistency_lock.read().unwrap();
2718 handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
2721 fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> {
2722 let _ = self.total_consistency_lock.read().unwrap();
2723 handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
2726 fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
2727 let _ = self.total_consistency_lock.read().unwrap();
2728 let mut failed_channels = Vec::new();
2729 let mut failed_payments = Vec::new();
2731 let mut channel_state_lock = self.channel_state.lock().unwrap();
2732 let channel_state = channel_state_lock.borrow_parts();
2733 let short_to_id = channel_state.short_to_id;
2734 let pending_msg_events = channel_state.pending_msg_events;
2735 if no_connection_possible {
2736 log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id));
2737 channel_state.by_id.retain(|_, chan| {
2738 if chan.get_their_node_id() == *their_node_id {
2739 if let Some(short_id) = chan.get_short_channel_id() {
2740 short_to_id.remove(&short_id);
2742 failed_channels.push(chan.force_shutdown());
2743 if let Ok(update) = self.get_channel_update(&chan) {
2744 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2754 log_debug!(self, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id));
2755 channel_state.by_id.retain(|_, chan| {
2756 if chan.get_their_node_id() == *their_node_id {
2757 //TODO: mark channel disabled (and maybe announce such after a timeout).
2758 let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused();
2759 if !failed_adds.is_empty() {
2760 let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
2761 failed_payments.push((chan_update, failed_adds));
2763 if chan.is_shutdown() {
2764 if let Some(short_id) = chan.get_short_channel_id() {
2765 short_to_id.remove(&short_id);
2774 for failure in failed_channels.drain(..) {
2775 self.finish_force_close_channel(failure);
2777 for (chan_update, mut htlc_sources) in failed_payments {
2778 for (htlc_source, payment_hash) in htlc_sources.drain(..) {
2779 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
2784 fn peer_connected(&self, their_node_id: &PublicKey) {
2785 log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
2787 let _ = self.total_consistency_lock.read().unwrap();
2788 let mut channel_state_lock = self.channel_state.lock().unwrap();
2789 let channel_state = channel_state_lock.borrow_parts();
2790 let pending_msg_events = channel_state.pending_msg_events;
2791 channel_state.by_id.retain(|_, chan| {
2792 if chan.get_their_node_id() == *their_node_id {
2793 if !chan.have_received_message() {
2794 // If we created this (outbound) channel while we were disconnected from the
2795 // peer we probably failed to send the open_channel message, which is now
2796 // lost. We can't have had anything pending related to this channel, so we just
2800 pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
2801 node_id: chan.get_their_node_id(),
2802 msg: chan.get_channel_reestablish(),
2808 //TODO: Also re-broadcast announcement_signatures
2811 fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
2812 let _ = self.total_consistency_lock.read().unwrap();
2814 if msg.channel_id == [0; 32] {
2815 for chan in self.list_channels() {
2816 if chan.remote_network_id == *their_node_id {
2817 self.force_close_channel(&chan.channel_id);
2821 self.force_close_channel(&msg.channel_id);
2826 const SERIALIZATION_VERSION: u8 = 1;
2827 const MIN_SERIALIZATION_VERSION: u8 = 1;
2829 impl Writeable for PendingForwardHTLCInfo {
2830 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2831 if let &Some(ref onion) = &self.onion_packet {
2833 onion.write(writer)?;
2837 self.incoming_shared_secret.write(writer)?;
2838 self.payment_hash.write(writer)?;
2839 self.short_channel_id.write(writer)?;
2840 self.amt_to_forward.write(writer)?;
2841 self.outgoing_cltv_value.write(writer)?;
2846 impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
2847 fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
2848 let onion_packet = match <u8 as Readable<R>>::read(reader)? {
2850 1 => Some(msgs::OnionPacket::read(reader)?),
2851 _ => return Err(DecodeError::InvalidValue),
2853 Ok(PendingForwardHTLCInfo {
2855 incoming_shared_secret: Readable::read(reader)?,
2856 payment_hash: Readable::read(reader)?,
2857 short_channel_id: Readable::read(reader)?,
2858 amt_to_forward: Readable::read(reader)?,
2859 outgoing_cltv_value: Readable::read(reader)?,
2864 impl Writeable for HTLCFailureMsg {
2865 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2867 &HTLCFailureMsg::Relay(ref fail_msg) => {
2869 fail_msg.write(writer)?;
2871 &HTLCFailureMsg::Malformed(ref fail_msg) => {
2873 fail_msg.write(writer)?;
2880 impl<R: ::std::io::Read> Readable<R> for HTLCFailureMsg {
2881 fn read(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
2882 match <u8 as Readable<R>>::read(reader)? {
2883 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
2884 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
2885 _ => Err(DecodeError::InvalidValue),
2890 impl Writeable for PendingHTLCStatus {
2891 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2893 &PendingHTLCStatus::Forward(ref forward_info) => {
2895 forward_info.write(writer)?;
2897 &PendingHTLCStatus::Fail(ref fail_msg) => {
2899 fail_msg.write(writer)?;
2906 impl<R: ::std::io::Read> Readable<R> for PendingHTLCStatus {
2907 fn read(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
2908 match <u8 as Readable<R>>::read(reader)? {
2909 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
2910 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
2911 _ => Err(DecodeError::InvalidValue),
2916 impl_writeable!(HTLCPreviousHopData, 0, {
2919 incoming_packet_shared_secret
2922 impl Writeable for HTLCSource {
2923 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2925 &HTLCSource::PreviousHopData(ref hop_data) => {
2927 hop_data.write(writer)?;
2929 &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => {
2931 route.write(writer)?;
2932 session_priv.write(writer)?;
2933 first_hop_htlc_msat.write(writer)?;
2940 impl<R: ::std::io::Read> Readable<R> for HTLCSource {
2941 fn read(reader: &mut R) -> Result<HTLCSource, DecodeError> {
2942 match <u8 as Readable<R>>::read(reader)? {
2943 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
2944 1 => Ok(HTLCSource::OutboundRoute {
2945 route: Readable::read(reader)?,
2946 session_priv: Readable::read(reader)?,
2947 first_hop_htlc_msat: Readable::read(reader)?,
2949 _ => Err(DecodeError::InvalidValue),
2954 impl Writeable for HTLCFailReason {
2955 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2957 &HTLCFailReason::ErrorPacket { ref err } => {
2961 &HTLCFailReason::Reason { ref failure_code, ref data } => {
2963 failure_code.write(writer)?;
2964 data.write(writer)?;
2971 impl<R: ::std::io::Read> Readable<R> for HTLCFailReason {
2972 fn read(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
2973 match <u8 as Readable<R>>::read(reader)? {
2974 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }),
2975 1 => Ok(HTLCFailReason::Reason {
2976 failure_code: Readable::read(reader)?,
2977 data: Readable::read(reader)?,
2979 _ => Err(DecodeError::InvalidValue),
2984 impl_writeable!(HTLCForwardInfo, 0, {
2985 prev_short_channel_id,
2990 impl Writeable for ChannelManager {
2991 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2992 let _ = self.total_consistency_lock.write().unwrap();
2994 writer.write_all(&[SERIALIZATION_VERSION; 1])?;
2995 writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
2997 self.genesis_hash.write(writer)?;
2998 (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
2999 self.last_block_hash.lock().unwrap().write(writer)?;
3001 let channel_state = self.channel_state.lock().unwrap();
3002 let mut unfunded_channels = 0;
3003 for (_, channel) in channel_state.by_id.iter() {
3004 if !channel.is_funding_initiated() {
3005 unfunded_channels += 1;
3008 ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
3009 for (_, channel) in channel_state.by_id.iter() {
3010 if channel.is_funding_initiated() {
3011 channel.write(writer)?;
3015 (channel_state.forward_htlcs.len() as u64).write(writer)?;
3016 for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
3017 short_channel_id.write(writer)?;
3018 (pending_forwards.len() as u64).write(writer)?;
3019 for forward in pending_forwards {
3020 forward.write(writer)?;
3024 (channel_state.claimable_htlcs.len() as u64).write(writer)?;
3025 for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
3026 payment_hash.write(writer)?;
3027 (previous_hops.len() as u64).write(writer)?;
3028 for previous_hop in previous_hops {
3029 previous_hop.write(writer)?;
3037 /// Arguments for the creation of a ChannelManager that are not deserialized.
3039 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
3041 /// 1) Deserialize all stored ChannelMonitors.
3042 /// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
3043 /// ChannelManager)>::read(reader, args).
3044 /// This may result in closing some Channels if the ChannelMonitor is newer than the stored
3045 /// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
3046 /// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
3047 /// ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo().
3048 /// 4) Reconnect blocks on your ChannelMonitors.
3049 /// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
3050 /// 6) Disconnect/connect blocks on the ChannelManager.
3051 /// 7) Register the new ChannelManager with your ChainWatchInterface (this does not happen
3052 /// automatically as it does in ChannelManager::new()).
3053 pub struct ChannelManagerReadArgs<'a> {
3054 /// The keys provider which will give us relevant keys. Some keys will be loaded during
3055 /// deserialization.
3056 pub keys_manager: Arc<KeysInterface>,
3058 /// The fee_estimator for use in the ChannelManager in the future.
3060 /// No calls to the FeeEstimator will be made during deserialization.
3061 pub fee_estimator: Arc<FeeEstimator>,
3062 /// The ManyChannelMonitor for use in the ChannelManager in the future.
3064 /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
3065 /// you have deserialized ChannelMonitors separately and will add them to your
3066 /// ManyChannelMonitor after deserializing this ChannelManager.
3067 pub monitor: Arc<ManyChannelMonitor>,
3068 /// The ChainWatchInterface for use in the ChannelManager in the future.
3070 /// No calls to the ChainWatchInterface will be made during deserialization.
3071 pub chain_monitor: Arc<ChainWatchInterface>,
3072 /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
3073 /// used to broadcast the latest local commitment transactions of channels which must be
3074 /// force-closed during deserialization.
3075 pub tx_broadcaster: Arc<BroadcasterInterface>,
3076 /// The Logger for use in the ChannelManager and which may be used to log information during
3077 /// deserialization.
3078 pub logger: Arc<Logger>,
3079 /// Default settings used for new channels. Any existing channels will continue to use the
3080 /// runtime settings which were stored when the ChannelManager was serialized.
3081 pub default_config: UserConfig,
3083 /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
3084 /// value.get_funding_txo() should be the key).
3086 /// If a monitor is inconsistent with the channel state during deserialization the channel will
3087 /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This
3088 /// is true for missing channels as well. If there is a monitor missing for which we find
3089 /// channel data Err(DecodeError::InvalidValue) will be returned.
3091 /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
3093 pub channel_monitors: &'a HashMap<OutPoint, &'a ChannelMonitor>,
3096 impl<'a, R : ::std::io::Read> ReadableArgs<R, ChannelManagerReadArgs<'a>> for (Sha256dHash, ChannelManager) {
3097 fn read(reader: &mut R, args: ChannelManagerReadArgs<'a>) -> Result<Self, DecodeError> {
3098 let _ver: u8 = Readable::read(reader)?;
3099 let min_ver: u8 = Readable::read(reader)?;
3100 if min_ver > SERIALIZATION_VERSION {
3101 return Err(DecodeError::UnknownVersion);
3104 let genesis_hash: Sha256dHash = Readable::read(reader)?;
3105 let latest_block_height: u32 = Readable::read(reader)?;
3106 let last_block_hash: Sha256dHash = Readable::read(reader)?;
3108 let mut closed_channels = Vec::new();
3110 let channel_count: u64 = Readable::read(reader)?;
3111 let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
3112 let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
3113 let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
3114 for _ in 0..channel_count {
3115 let mut channel: Channel = ReadableArgs::read(reader, args.logger.clone())?;
3116 if channel.last_block_connected != last_block_hash {
3117 return Err(DecodeError::InvalidValue);
3120 let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
3121 funding_txo_set.insert(funding_txo.clone());
3122 if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
3123 if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
3124 channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
3125 channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
3126 let mut force_close_res = channel.force_shutdown();
3127 force_close_res.0 = monitor.get_latest_local_commitment_txn();
3128 closed_channels.push(force_close_res);
3130 if let Some(short_channel_id) = channel.get_short_channel_id() {
3131 short_to_id.insert(short_channel_id, channel.channel_id());
3133 by_id.insert(channel.channel_id(), channel);
3136 return Err(DecodeError::InvalidValue);
3140 for (ref funding_txo, ref monitor) in args.channel_monitors.iter() {
3141 if !funding_txo_set.contains(funding_txo) {
3142 closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
3146 let forward_htlcs_count: u64 = Readable::read(reader)?;
3147 let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
3148 for _ in 0..forward_htlcs_count {
3149 let short_channel_id = Readable::read(reader)?;
3150 let pending_forwards_count: u64 = Readable::read(reader)?;
3151 let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, 128));
3152 for _ in 0..pending_forwards_count {
3153 pending_forwards.push(Readable::read(reader)?);
3155 forward_htlcs.insert(short_channel_id, pending_forwards);
3158 let claimable_htlcs_count: u64 = Readable::read(reader)?;
3159 let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
3160 for _ in 0..claimable_htlcs_count {
3161 let payment_hash = Readable::read(reader)?;
3162 let previous_hops_len: u64 = Readable::read(reader)?;
3163 let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
3164 for _ in 0..previous_hops_len {
3165 previous_hops.push(Readable::read(reader)?);
3167 claimable_htlcs.insert(payment_hash, previous_hops);
3170 let channel_manager = ChannelManager {
3172 fee_estimator: args.fee_estimator,
3173 monitor: args.monitor,
3174 chain_monitor: args.chain_monitor,
3175 tx_broadcaster: args.tx_broadcaster,
3177 latest_block_height: AtomicUsize::new(latest_block_height as usize),
3178 last_block_hash: Mutex::new(last_block_hash),
3179 secp_ctx: Secp256k1::new(),
3181 channel_state: Mutex::new(ChannelHolder {
3184 next_forward: Instant::now(),
3187 pending_msg_events: Vec::new(),
3189 our_network_key: args.keys_manager.get_node_secret(),
3191 pending_events: Mutex::new(Vec::new()),
3192 total_consistency_lock: RwLock::new(()),
3193 keys_manager: args.keys_manager,
3194 logger: args.logger,
3195 default_configuration: args.default_config,
3198 for close_res in closed_channels.drain(..) {
3199 channel_manager.finish_force_close_channel(close_res);
3200 //TODO: Broadcast channel update for closed channels, but only after we've made a
3201 //connection or two.
3204 Ok((last_block_hash.clone(), channel_manager))
3210 use chain::chaininterface;
3211 use chain::transaction::OutPoint;
3212 use chain::chaininterface::{ChainListener, ChainWatchInterface};
3213 use chain::keysinterface::KeysInterface;
3214 use chain::keysinterface;
3215 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
3216 use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder};
3217 use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor};
3218 use ln::router::{Route, RouteHop, Router};
3220 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
3221 use util::test_utils;
3222 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
3223 use util::errors::APIError;
3224 use util::logger::Logger;
3225 use util::ser::{Writeable, Writer, ReadableArgs};
3226 use util::config::UserConfig;
3228 use bitcoin::util::hash::Sha256dHash;
3229 use bitcoin::blockdata::block::{Block, BlockHeader};
3230 use bitcoin::blockdata::transaction::{Transaction, TxOut};
3231 use bitcoin::blockdata::constants::genesis_block;
3232 use bitcoin::network::constants::Network;
3233 use bitcoin::network::serialize::serialize;
3234 use bitcoin::network::serialize::BitcoinHash;
3238 use secp256k1::{Secp256k1, Message};
3239 use secp256k1::key::{PublicKey,SecretKey};
3241 use crypto::sha2::Sha256;
3242 use crypto::digest::Digest;
3244 use rand::{thread_rng,Rng};
3246 use std::cell::RefCell;
3247 use std::collections::{BTreeSet, HashMap};
3248 use std::default::Default;
3250 use std::sync::{Arc, Mutex};
3251 use std::sync::atomic::Ordering;
3252 use std::time::Instant;
3255 fn build_test_onion_keys() -> Vec<OnionKeys> {
3256 // Keys from BOLT 4, used in both test vector tests
3257 let secp_ctx = Secp256k1::new();
3262 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
3263 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3266 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
3267 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3270 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
3271 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3274 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]).unwrap(),
3275 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3278 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()[..]).unwrap(),
3279 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3284 let session_priv = SecretKey::from_slice(&secp_ctx, &hex::decode("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap();
3286 let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
3287 assert_eq!(onion_keys.len(), route.hops.len());
3292 fn onion_vectors() {
3293 // Packet creation test vectors from BOLT 4
3294 let onion_keys = build_test_onion_keys();
3296 assert_eq!(onion_keys[0].shared_secret[..], hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
3297 assert_eq!(onion_keys[0].blinding_factor[..], hex::decode("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
3298 assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
3299 assert_eq!(onion_keys[0].rho, hex::decode("ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986").unwrap()[..]);
3300 assert_eq!(onion_keys[0].mu, hex::decode("b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba").unwrap()[..]);
3302 assert_eq!(onion_keys[1].shared_secret[..], hex::decode("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
3303 assert_eq!(onion_keys[1].blinding_factor[..], hex::decode("bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f").unwrap()[..]);
3304 assert_eq!(onion_keys[1].ephemeral_pubkey.serialize()[..], hex::decode("028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2").unwrap()[..]);
3305 assert_eq!(onion_keys[1].rho, hex::decode("450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59").unwrap()[..]);
3306 assert_eq!(onion_keys[1].mu, hex::decode("05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9").unwrap()[..]);
3308 assert_eq!(onion_keys[2].shared_secret[..], hex::decode("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
3309 assert_eq!(onion_keys[2].blinding_factor[..], hex::decode("a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5").unwrap()[..]);
3310 assert_eq!(onion_keys[2].ephemeral_pubkey.serialize()[..], hex::decode("03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0").unwrap()[..]);
3311 assert_eq!(onion_keys[2].rho, hex::decode("11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea").unwrap()[..]);
3312 assert_eq!(onion_keys[2].mu, hex::decode("caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78").unwrap()[..]);
3314 assert_eq!(onion_keys[3].shared_secret[..], hex::decode("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
3315 assert_eq!(onion_keys[3].blinding_factor[..], hex::decode("7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262").unwrap()[..]);
3316 assert_eq!(onion_keys[3].ephemeral_pubkey.serialize()[..], hex::decode("031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595").unwrap()[..]);
3317 assert_eq!(onion_keys[3].rho, hex::decode("cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e").unwrap()[..]);
3318 assert_eq!(onion_keys[3].mu, hex::decode("5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9").unwrap()[..]);
3320 assert_eq!(onion_keys[4].shared_secret[..], hex::decode("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
3321 assert_eq!(onion_keys[4].blinding_factor[..], hex::decode("c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205").unwrap()[..]);
3322 assert_eq!(onion_keys[4].ephemeral_pubkey.serialize()[..], hex::decode("03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4").unwrap()[..]);
3323 assert_eq!(onion_keys[4].rho, hex::decode("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
3324 assert_eq!(onion_keys[4].mu, hex::decode("8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a").unwrap()[..]);
3326 // Test vectors below are flat-out wrong: they claim to set outgoing_cltv_value to non-0 :/
3327 let payloads = vec!(
3328 msgs::OnionHopData {
3330 data: msgs::OnionRealm0HopData {
3331 short_channel_id: 0,
3333 outgoing_cltv_value: 0,
3337 msgs::OnionHopData {
3339 data: msgs::OnionRealm0HopData {
3340 short_channel_id: 0x0101010101010101,
3341 amt_to_forward: 0x0100000001,
3342 outgoing_cltv_value: 0,
3346 msgs::OnionHopData {
3348 data: msgs::OnionRealm0HopData {
3349 short_channel_id: 0x0202020202020202,
3350 amt_to_forward: 0x0200000002,
3351 outgoing_cltv_value: 0,
3355 msgs::OnionHopData {
3357 data: msgs::OnionRealm0HopData {
3358 short_channel_id: 0x0303030303030303,
3359 amt_to_forward: 0x0300000003,
3360 outgoing_cltv_value: 0,
3364 msgs::OnionHopData {
3366 data: msgs::OnionRealm0HopData {
3367 short_channel_id: 0x0404040404040404,
3368 amt_to_forward: 0x0400000004,
3369 outgoing_cltv_value: 0,
3375 let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]);
3376 // Just check the final packet encoding, as it includes all the per-hop vectors in it
3378 assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
3382 fn test_failure_packet_onion() {
3383 // Returning Errors test vectors from BOLT 4
3385 let onion_keys = build_test_onion_keys();
3386 let onion_error = ChannelManager::build_failure_packet(&onion_keys[4].shared_secret[..], 0x2002, &[0; 0]);
3387 assert_eq!(onion_error.encode(), hex::decode("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap());
3389 let onion_packet_1 = ChannelManager::encrypt_failure_packet(&onion_keys[4].shared_secret[..], &onion_error.encode()[..]);
3390 assert_eq!(onion_packet_1.data, hex::decode("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap());
3392 let onion_packet_2 = ChannelManager::encrypt_failure_packet(&onion_keys[3].shared_secret[..], &onion_packet_1.data[..]);
3393 assert_eq!(onion_packet_2.data, hex::decode("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap());
3395 let onion_packet_3 = ChannelManager::encrypt_failure_packet(&onion_keys[2].shared_secret[..], &onion_packet_2.data[..]);
3396 assert_eq!(onion_packet_3.data, hex::decode("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap());
3398 let onion_packet_4 = ChannelManager::encrypt_failure_packet(&onion_keys[1].shared_secret[..], &onion_packet_3.data[..]);
3399 assert_eq!(onion_packet_4.data, hex::decode("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap());
3401 let onion_packet_5 = ChannelManager::encrypt_failure_packet(&onion_keys[0].shared_secret[..], &onion_packet_4.data[..]);
3402 assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
3405 fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
3406 assert!(chain.does_match_tx(tx));
3407 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3408 chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
3410 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3411 chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
3416 chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
3417 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
3418 chan_monitor: Arc<test_utils::TestChannelMonitor>,
3419 node: Arc<ChannelManager>,
3421 node_seed: [u8; 32],
3422 network_payment_count: Rc<RefCell<u8>>,
3423 network_chan_count: Rc<RefCell<u32>>,
3425 impl Drop for Node {
3426 fn drop(&mut self) {
3427 if !::std::thread::panicking() {
3428 // Check that we processed all pending events
3429 assert_eq!(self.node.get_and_clear_pending_msg_events().len(), 0);
3430 assert_eq!(self.node.get_and_clear_pending_events().len(), 0);
3431 assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0);
3436 fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3437 create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
3440 fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3441 let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
3442 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
3443 (announcement, as_update, bs_update, channel_id, tx)
3446 macro_rules! get_revoke_commit_msgs {
3447 ($node: expr, $node_id: expr) => {
3449 let events = $node.node.get_and_clear_pending_msg_events();
3450 assert_eq!(events.len(), 2);
3452 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
3453 assert_eq!(*node_id, $node_id);
3456 _ => panic!("Unexpected event"),
3457 }, match events[1] {
3458 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3459 assert_eq!(*node_id, $node_id);
3460 assert!(updates.update_add_htlcs.is_empty());
3461 assert!(updates.update_fulfill_htlcs.is_empty());
3462 assert!(updates.update_fail_htlcs.is_empty());
3463 assert!(updates.update_fail_malformed_htlcs.is_empty());
3464 assert!(updates.update_fee.is_none());
3465 updates.commitment_signed.clone()
3467 _ => panic!("Unexpected event"),
3473 macro_rules! get_event_msg {
3474 ($node: expr, $event_type: path, $node_id: expr) => {
3476 let events = $node.node.get_and_clear_pending_msg_events();
3477 assert_eq!(events.len(), 1);
3479 $event_type { ref node_id, ref msg } => {
3480 assert_eq!(*node_id, $node_id);
3483 _ => panic!("Unexpected event"),
3489 macro_rules! get_htlc_update_msgs {
3490 ($node: expr, $node_id: expr) => {
3492 let events = $node.node.get_and_clear_pending_msg_events();
3493 assert_eq!(events.len(), 1);
3495 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3496 assert_eq!(*node_id, $node_id);
3499 _ => panic!("Unexpected event"),
3505 macro_rules! get_feerate {
3506 ($node: expr, $channel_id: expr) => {
3508 let chan_lock = $node.node.channel_state.lock().unwrap();
3509 let chan = chan_lock.by_id.get(&$channel_id).unwrap();
3516 fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction {
3517 node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
3518 node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
3519 node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
3521 let chan_id = *node_a.network_chan_count.borrow();
3525 let events_2 = node_a.node.get_and_clear_pending_events();
3526 assert_eq!(events_2.len(), 1);
3528 Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
3529 assert_eq!(*channel_value_satoshis, channel_value);
3530 assert_eq!(user_channel_id, 42);
3532 tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
3533 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
3535 funding_output = OutPoint::new(Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0);
3537 node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
3538 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
3539 assert_eq!(added_monitors.len(), 1);
3540 assert_eq!(added_monitors[0].0, funding_output);
3541 added_monitors.clear();
3543 _ => panic!("Unexpected event"),
3546 node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap();
3548 let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
3549 assert_eq!(added_monitors.len(), 1);
3550 assert_eq!(added_monitors[0].0, funding_output);
3551 added_monitors.clear();
3554 node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())).unwrap();
3556 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
3557 assert_eq!(added_monitors.len(), 1);
3558 assert_eq!(added_monitors[0].0, funding_output);
3559 added_monitors.clear();
3562 let events_4 = node_a.node.get_and_clear_pending_events();
3563 assert_eq!(events_4.len(), 1);
3565 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
3566 assert_eq!(user_channel_id, 42);
3567 assert_eq!(*funding_txo, funding_output);
3569 _ => panic!("Unexpected event"),
3575 fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
3576 confirm_transaction(&node_b.chain_monitor, &tx, tx.version);
3577 node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingLocked, node_a.node.get_our_node_id())).unwrap();
3581 confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
3582 let events_6 = node_a.node.get_and_clear_pending_msg_events();
3583 assert_eq!(events_6.len(), 2);
3584 ((match events_6[0] {
3585 MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
3586 channel_id = msg.channel_id.clone();
3587 assert_eq!(*node_id, node_b.node.get_our_node_id());
3590 _ => panic!("Unexpected event"),
3591 }, match events_6[1] {
3592 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
3593 assert_eq!(*node_id, node_b.node.get_our_node_id());
3596 _ => panic!("Unexpected event"),
3600 fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
3601 let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
3602 let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
3606 fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
3607 node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap();
3608 let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
3609 node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
3611 let events_7 = node_b.node.get_and_clear_pending_msg_events();
3612 assert_eq!(events_7.len(), 1);
3613 let (announcement, bs_update) = match events_7[0] {
3614 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
3617 _ => panic!("Unexpected event"),
3620 node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
3621 let events_8 = node_a.node.get_and_clear_pending_msg_events();
3622 assert_eq!(events_8.len(), 1);
3623 let as_update = match events_8[0] {
3624 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
3625 assert!(*announcement == *msg);
3628 _ => panic!("Unexpected event"),
3631 *node_a.network_chan_count.borrow_mut() += 1;
3633 ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
3636 fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3637 create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
3640 fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3641 let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
3643 assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
3644 node.router.handle_channel_update(&chan_announcement.1).unwrap();
3645 node.router.handle_channel_update(&chan_announcement.2).unwrap();
3647 (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
3650 macro_rules! check_spends {
3651 ($tx: expr, $spends_tx: expr) => {
3653 let mut funding_tx_map = HashMap::new();
3654 let spends_tx = $spends_tx;
3655 funding_tx_map.insert(spends_tx.txid(), spends_tx);
3656 $tx.verify(&funding_tx_map).unwrap();
3661 macro_rules! get_closing_signed_broadcast {
3662 ($node: expr, $dest_pubkey: expr) => {
3664 let events = $node.get_and_clear_pending_msg_events();
3665 assert!(events.len() == 1 || events.len() == 2);
3666 (match events[events.len() - 1] {
3667 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
3668 assert_eq!(msg.contents.flags & 2, 2);
3671 _ => panic!("Unexpected event"),
3672 }, if events.len() == 2 {
3674 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
3675 assert_eq!(*node_id, $dest_pubkey);
3678 _ => panic!("Unexpected event"),
3685 fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
3686 let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
3687 let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
3690 node_a.close_channel(channel_id).unwrap();
3691 node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())).unwrap();
3693 let events_1 = node_b.get_and_clear_pending_msg_events();
3694 assert!(events_1.len() >= 1);
3695 let shutdown_b = match events_1[0] {
3696 MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
3697 assert_eq!(node_id, &node_a.get_our_node_id());
3700 _ => panic!("Unexpected event"),
3703 let closing_signed_b = if !close_inbound_first {
3704 assert_eq!(events_1.len(), 1);
3707 Some(match events_1[1] {
3708 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
3709 assert_eq!(node_id, &node_a.get_our_node_id());
3712 _ => panic!("Unexpected event"),
3716 node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap();
3717 let (as_update, bs_update) = if close_inbound_first {
3718 assert!(node_a.get_and_clear_pending_msg_events().is_empty());
3719 node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
3720 assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
3721 tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
3722 let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
3724 node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
3725 let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
3726 assert!(none_b.is_none());
3727 assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
3728 tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
3729 (as_update, bs_update)
3731 let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id());
3733 node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a).unwrap();
3734 assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
3735 tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
3736 let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
3738 node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
3739 let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
3740 assert!(none_a.is_none());
3741 assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
3742 tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
3743 (as_update, bs_update)
3745 assert_eq!(tx_a, tx_b);
3746 check_spends!(tx_a, funding_tx);
3748 (as_update, bs_update)
3753 msgs: Vec<msgs::UpdateAddHTLC>,
3754 commitment_msg: msgs::CommitmentSigned,
3757 fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent {
3758 assert!(updates.update_fulfill_htlcs.is_empty());
3759 assert!(updates.update_fail_htlcs.is_empty());
3760 assert!(updates.update_fail_malformed_htlcs.is_empty());
3761 assert!(updates.update_fee.is_none());
3762 SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
3765 fn from_event(event: MessageSendEvent) -> SendEvent {
3767 MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates),
3768 _ => panic!("Unexpected event type!"),
3773 macro_rules! check_added_monitors {
3774 ($node: expr, $count: expr) => {
3776 let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
3777 assert_eq!(added_monitors.len(), $count);
3778 added_monitors.clear();
3783 macro_rules! commitment_signed_dance {
3784 ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
3786 check_added_monitors!($node_a, 0);
3787 assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
3788 $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
3789 check_added_monitors!($node_a, 1);
3790 commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
3793 ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
3795 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
3796 check_added_monitors!($node_b, 0);
3797 assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
3798 $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
3799 assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
3800 check_added_monitors!($node_b, 1);
3801 $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap();
3802 let (bs_revoke_and_ack, extra_msg_option) = {
3803 let events = $node_b.node.get_and_clear_pending_msg_events();
3804 assert!(events.len() <= 2);
3806 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
3807 assert_eq!(*node_id, $node_a.node.get_our_node_id());
3810 _ => panic!("Unexpected event"),
3811 }, events.get(1).map(|e| e.clone()))
3813 check_added_monitors!($node_b, 1);
3814 if $fail_backwards {
3815 assert!($node_a.node.get_and_clear_pending_events().is_empty());
3816 assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
3818 $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
3820 let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
3821 if $fail_backwards {
3822 assert_eq!(added_monitors.len(), 2);
3823 assert!(added_monitors[0].0 != added_monitors[1].0);
3825 assert_eq!(added_monitors.len(), 1);
3827 added_monitors.clear();
3832 ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
3834 assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
3837 ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
3839 commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
3840 if $fail_backwards {
3841 let channel_state = $node_a.node.channel_state.lock().unwrap();
3842 assert_eq!(channel_state.pending_msg_events.len(), 1);
3843 if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
3844 assert_ne!(*node_id, $node_b.node.get_our_node_id());
3845 } else { panic!("Unexpected event"); }
3847 assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
3853 macro_rules! get_payment_preimage_hash {
3856 let payment_preimage = [*$node.network_payment_count.borrow(); 32];
3857 *$node.network_payment_count.borrow_mut() += 1;
3858 let mut payment_hash = [0; 32];
3859 let mut sha = Sha256::new();
3860 sha.input(&payment_preimage[..]);
3861 sha.result(&mut payment_hash);
3862 (payment_preimage, payment_hash)
3867 fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
3868 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
3870 let mut payment_event = {
3871 origin_node.node.send_payment(route, our_payment_hash).unwrap();
3872 check_added_monitors!(origin_node, 1);
3874 let mut events = origin_node.node.get_and_clear_pending_msg_events();
3875 assert_eq!(events.len(), 1);
3876 SendEvent::from_event(events.remove(0))
3878 let mut prev_node = origin_node;
3880 for (idx, &node) in expected_route.iter().enumerate() {
3881 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
3883 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3884 check_added_monitors!(node, 0);
3885 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
3887 let events_1 = node.node.get_and_clear_pending_events();
3888 assert_eq!(events_1.len(), 1);
3890 Event::PendingHTLCsForwardable { .. } => { },
3891 _ => panic!("Unexpected event"),
3894 node.node.channel_state.lock().unwrap().next_forward = Instant::now();
3895 node.node.process_pending_htlc_forwards();
3897 if idx == expected_route.len() - 1 {
3898 let events_2 = node.node.get_and_clear_pending_events();
3899 assert_eq!(events_2.len(), 1);
3901 Event::PaymentReceived { ref payment_hash, amt } => {
3902 assert_eq!(our_payment_hash, *payment_hash);
3903 assert_eq!(amt, recv_value);
3905 _ => panic!("Unexpected event"),
3908 let mut events_2 = node.node.get_and_clear_pending_msg_events();
3909 assert_eq!(events_2.len(), 1);
3910 check_added_monitors!(node, 1);
3911 payment_event = SendEvent::from_event(events_2.remove(0));
3912 assert_eq!(payment_event.msgs.len(), 1);
3918 (our_payment_preimage, our_payment_hash)
3921 fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) {
3922 assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
3923 check_added_monitors!(expected_route.last().unwrap(), 1);
3925 let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
3926 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
3927 macro_rules! get_next_msgs {
3930 let events = $node.node.get_and_clear_pending_msg_events();
3931 assert_eq!(events.len(), 1);
3933 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
3934 assert!(update_add_htlcs.is_empty());
3935 assert_eq!(update_fulfill_htlcs.len(), 1);
3936 assert!(update_fail_htlcs.is_empty());
3937 assert!(update_fail_malformed_htlcs.is_empty());
3938 assert!(update_fee.is_none());
3939 expected_next_node = node_id.clone();
3940 Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()))
3942 _ => panic!("Unexpected event"),
3948 macro_rules! last_update_fulfill_dance {
3949 ($node: expr, $prev_node: expr) => {
3951 $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3952 check_added_monitors!($node, 0);
3953 assert!($node.node.get_and_clear_pending_msg_events().is_empty());
3954 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
3958 macro_rules! mid_update_fulfill_dance {
3959 ($node: expr, $prev_node: expr, $new_msgs: expr) => {
3961 $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3962 check_added_monitors!($node, 1);
3963 let new_next_msgs = if $new_msgs {
3964 get_next_msgs!($node)
3966 assert!($node.node.get_and_clear_pending_msg_events().is_empty());
3969 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
3970 next_msgs = new_next_msgs;
3975 let mut prev_node = expected_route.last().unwrap();
3976 for (idx, node) in expected_route.iter().rev().enumerate() {
3977 assert_eq!(expected_next_node, node.node.get_our_node_id());
3978 let update_next_msgs = !skip_last || idx != expected_route.len() - 1;
3979 if next_msgs.is_some() {
3980 mid_update_fulfill_dance!(node, prev_node, update_next_msgs);
3981 } else if update_next_msgs {
3982 next_msgs = get_next_msgs!(node);
3984 assert!(node.node.get_and_clear_pending_msg_events().is_empty());
3986 if !skip_last && idx == expected_route.len() - 1 {
3987 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
3994 last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
3995 let events = origin_node.node.get_and_clear_pending_events();
3996 assert_eq!(events.len(), 1);
3998 Event::PaymentSent { payment_preimage } => {
3999 assert_eq!(payment_preimage, our_payment_preimage);
4001 _ => panic!("Unexpected event"),
4006 fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) {
4007 claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage);
4010 const TEST_FINAL_CLTV: u32 = 32;
4012 fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
4013 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
4014 assert_eq!(route.hops.len(), expected_route.len());
4015 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
4016 assert_eq!(hop.pubkey, node.node.get_our_node_id());
4019 send_along_route(origin_node, route, expected_route, recv_value)
4022 fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
4023 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
4024 assert_eq!(route.hops.len(), expected_route.len());
4025 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
4026 assert_eq!(hop.pubkey, node.node.get_our_node_id());
4029 let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
4031 let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
4033 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
4034 _ => panic!("Unknown error variants"),
4038 fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
4039 let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
4040 claim_payment(&origin, expected_route, our_payment_preimage);
4043 fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) {
4044 assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, PaymentFailReason::PreimageUnknown));
4045 check_added_monitors!(expected_route.last().unwrap(), 1);
4047 let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
4048 macro_rules! update_fail_dance {
4049 ($node: expr, $prev_node: expr, $last_node: expr) => {
4051 $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
4052 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
4057 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
4058 let mut prev_node = expected_route.last().unwrap();
4059 for (idx, node) in expected_route.iter().rev().enumerate() {
4060 assert_eq!(expected_next_node, node.node.get_our_node_id());
4061 if next_msgs.is_some() {
4062 // We may be the "last node" for the purpose of the commitment dance if we're
4063 // skipping the last node (implying it is disconnected) and we're the
4064 // second-to-last node!
4065 update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1);
4068 let events = node.node.get_and_clear_pending_msg_events();
4069 if !skip_last || idx != expected_route.len() - 1 {
4070 assert_eq!(events.len(), 1);
4072 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4073 assert!(update_add_htlcs.is_empty());
4074 assert!(update_fulfill_htlcs.is_empty());
4075 assert_eq!(update_fail_htlcs.len(), 1);
4076 assert!(update_fail_malformed_htlcs.is_empty());
4077 assert!(update_fee.is_none());
4078 expected_next_node = node_id.clone();
4079 next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
4081 _ => panic!("Unexpected event"),
4084 assert!(events.is_empty());
4086 if !skip_last && idx == expected_route.len() - 1 {
4087 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
4094 update_fail_dance!(origin_node, expected_route.first().unwrap(), true);
4096 let events = origin_node.node.get_and_clear_pending_events();
4097 assert_eq!(events.len(), 1);
4099 Event::PaymentFailed { payment_hash, rejected_by_dest } => {
4100 assert_eq!(payment_hash, our_payment_hash);
4101 assert!(rejected_by_dest);
4103 _ => panic!("Unexpected event"),
4108 fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) {
4109 fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
4112 fn create_network(node_count: usize) -> Vec<Node> {
4113 let mut nodes = Vec::new();
4114 let mut rng = thread_rng();
4115 let secp_ctx = Secp256k1::new();
4116 let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
4118 let chan_count = Rc::new(RefCell::new(0));
4119 let payment_count = Rc::new(RefCell::new(0));
4121 for _ in 0..node_count {
4122 let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
4123 let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
4124 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
4125 let mut seed = [0; 32];
4126 rng.fill_bytes(&mut seed);
4127 let keys_manager = Arc::new(keysinterface::KeysManager::new(&seed, Network::Testnet, Arc::clone(&logger)));
4128 let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone()));
4129 let mut config = UserConfig::new();
4130 config.channel_options.announced_channel = true;
4131 config.channel_limits.force_announced_channel_preference = false;
4132 let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap();
4133 let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
4134 nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, node_seed: seed,
4135 network_payment_count: payment_count.clone(),
4136 network_chan_count: chan_count.clone(),
4144 fn test_async_inbound_update_fee() {
4145 let mut nodes = create_network(2);
4146 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4147 let channel_id = chan.2;
4150 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4154 // send (1) commitment_signed -.
4155 // <- update_add_htlc/commitment_signed
4156 // send (2) RAA (awaiting remote revoke) -.
4157 // (1) commitment_signed is delivered ->
4158 // .- send (3) RAA (awaiting remote revoke)
4159 // (2) RAA is delivered ->
4160 // .- send (4) commitment_signed
4161 // <- (3) RAA is delivered
4162 // send (5) commitment_signed -.
4163 // <- (4) commitment_signed is delivered
4165 // (5) commitment_signed is delivered ->
4167 // (6) RAA is delivered ->
4169 // First nodes[0] generates an update_fee
4170 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
4171 check_added_monitors!(nodes[0], 1);
4173 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4174 assert_eq!(events_0.len(), 1);
4175 let (update_msg, commitment_signed) = match events_0[0] { // (1)
4176 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
4177 (update_fee.as_ref(), commitment_signed)
4179 _ => panic!("Unexpected event"),
4182 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4184 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
4185 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4186 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
4187 check_added_monitors!(nodes[1], 1);
4189 let payment_event = {
4190 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
4191 assert_eq!(events_1.len(), 1);
4192 SendEvent::from_event(events_1.remove(0))
4194 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
4195 assert_eq!(payment_event.msgs.len(), 1);
4197 // ...now when the messages get delivered everyone should be happy
4198 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4199 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
4200 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4201 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
4202 check_added_monitors!(nodes[0], 1);
4204 // deliver(1), generate (3):
4205 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4206 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4207 // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
4208 check_added_monitors!(nodes[1], 1);
4210 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); // deliver (2)
4211 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4212 assert!(bs_update.update_add_htlcs.is_empty()); // (4)
4213 assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
4214 assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
4215 assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
4216 assert!(bs_update.update_fee.is_none()); // (4)
4217 check_added_monitors!(nodes[1], 1);
4219 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); // deliver (3)
4220 let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4221 assert!(as_update.update_add_htlcs.is_empty()); // (5)
4222 assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
4223 assert!(as_update.update_fail_htlcs.is_empty()); // (5)
4224 assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
4225 assert!(as_update.update_fee.is_none()); // (5)
4226 check_added_monitors!(nodes[0], 1);
4228 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); // deliver (4)
4229 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4230 // only (6) so get_event_msg's assert(len == 1) passes
4231 check_added_monitors!(nodes[0], 1);
4233 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed).unwrap(); // deliver (5)
4234 let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4235 check_added_monitors!(nodes[1], 1);
4237 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
4238 check_added_monitors!(nodes[0], 1);
4240 let events_2 = nodes[0].node.get_and_clear_pending_events();
4241 assert_eq!(events_2.len(), 1);
4243 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
4244 _ => panic!("Unexpected event"),
4247 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); // deliver (6)
4248 check_added_monitors!(nodes[1], 1);
4252 fn test_update_fee_unordered_raa() {
4253 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
4254 // crash in an earlier version of the update_fee patch)
4255 let mut nodes = create_network(2);
4256 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4257 let channel_id = chan.2;
4260 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4262 // First nodes[0] generates an update_fee
4263 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
4264 check_added_monitors!(nodes[0], 1);
4266 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4267 assert_eq!(events_0.len(), 1);
4268 let update_msg = match events_0[0] { // (1)
4269 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
4272 _ => panic!("Unexpected event"),
4275 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4277 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
4278 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4279 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
4280 check_added_monitors!(nodes[1], 1);
4282 let payment_event = {
4283 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
4284 assert_eq!(events_1.len(), 1);
4285 SendEvent::from_event(events_1.remove(0))
4287 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
4288 assert_eq!(payment_event.msgs.len(), 1);
4290 // ...now when the messages get delivered everyone should be happy
4291 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4292 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
4293 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4294 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
4295 check_added_monitors!(nodes[0], 1);
4297 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2)
4298 check_added_monitors!(nodes[1], 1);
4300 // We can't continue, sadly, because our (1) now has a bogus signature
4304 fn test_multi_flight_update_fee() {
4305 let nodes = create_network(2);
4306 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4307 let channel_id = chan.2;
4310 // update_fee/commitment_signed ->
4311 // .- send (1) RAA and (2) commitment_signed
4312 // update_fee (never committed) ->
4313 // (3) update_fee ->
4314 // We have to manually generate the above update_fee, it is allowed by the protocol but we
4315 // don't track which updates correspond to which revoke_and_ack responses so we're in
4316 // AwaitingRAA mode and will not generate the update_fee yet.
4317 // <- (1) RAA delivered
4318 // (3) is generated and send (4) CS -.
4319 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
4320 // know the per_commitment_point to use for it.
4321 // <- (2) commitment_signed delivered
4322 // revoke_and_ack ->
4323 // B should send no response here
4324 // (4) commitment_signed delivered ->
4325 // <- RAA/commitment_signed delivered
4326 // revoke_and_ack ->
4328 // First nodes[0] generates an update_fee
4329 let initial_feerate = get_feerate!(nodes[0], channel_id);
4330 nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
4331 check_added_monitors!(nodes[0], 1);
4333 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4334 assert_eq!(events_0.len(), 1);
4335 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
4336 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
4337 (update_fee.as_ref().unwrap(), commitment_signed)
4339 _ => panic!("Unexpected event"),
4342 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
4343 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap();
4344 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap();
4345 let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4346 check_added_monitors!(nodes[1], 1);
4348 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
4350 nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
4351 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4352 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4354 // Create the (3) update_fee message that nodes[0] will generate before it does...
4355 let mut update_msg_2 = msgs::UpdateFee {
4356 channel_id: update_msg_1.channel_id.clone(),
4357 feerate_per_kw: (initial_feerate + 30) as u32,
4360 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
4362 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
4364 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
4366 // Deliver (1), generating (3) and (4)
4367 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap();
4368 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4369 check_added_monitors!(nodes[0], 1);
4370 assert!(as_second_update.update_add_htlcs.is_empty());
4371 assert!(as_second_update.update_fulfill_htlcs.is_empty());
4372 assert!(as_second_update.update_fail_htlcs.is_empty());
4373 assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
4374 // Check that the update_fee newly generated matches what we delivered:
4375 assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
4376 assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
4378 // Deliver (2) commitment_signed
4379 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
4380 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4381 check_added_monitors!(nodes[0], 1);
4382 // No commitment_signed so get_event_msg's assert(len == 1) passes
4384 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap();
4385 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4386 check_added_monitors!(nodes[1], 1);
4389 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed).unwrap();
4390 let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4391 check_added_monitors!(nodes[1], 1);
4393 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
4394 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4395 check_added_monitors!(nodes[0], 1);
4397 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment).unwrap();
4398 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4399 // No commitment_signed so get_event_msg's assert(len == 1) passes
4400 check_added_monitors!(nodes[0], 1);
4402 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap();
4403 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4404 check_added_monitors!(nodes[1], 1);
4408 fn test_update_fee_vanilla() {
4409 let nodes = create_network(2);
4410 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4411 let channel_id = chan.2;
4413 let feerate = get_feerate!(nodes[0], channel_id);
4414 nodes[0].node.update_fee(channel_id, feerate+25).unwrap();
4415 check_added_monitors!(nodes[0], 1);
4417 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4418 assert_eq!(events_0.len(), 1);
4419 let (update_msg, commitment_signed) = match events_0[0] {
4420 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4421 (update_fee.as_ref(), commitment_signed)
4423 _ => panic!("Unexpected event"),
4425 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4427 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4428 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4429 check_added_monitors!(nodes[1], 1);
4431 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4432 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4433 check_added_monitors!(nodes[0], 1);
4435 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
4436 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4437 // No commitment_signed so get_event_msg's assert(len == 1) passes
4438 check_added_monitors!(nodes[0], 1);
4440 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
4441 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4442 check_added_monitors!(nodes[1], 1);
4446 fn test_update_fee_that_funder_cannot_afford() {
4447 let mut nodes = create_network(2);
4448 let channel_value = 1888;
4449 let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000);
4450 let channel_id = chan.2;
4453 nodes[0].node.update_fee(channel_id, feerate).unwrap();
4454 check_added_monitors!(nodes[0], 1);
4455 let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4457 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()).unwrap();
4459 commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
4461 //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
4462 //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
4464 let chan_lock = nodes[1].node.channel_state.lock().unwrap();
4465 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4467 //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
4468 let num_htlcs = chan.last_local_commitment_txn[0].output.len() - 2;
4469 let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
4470 let mut actual_fee = chan.last_local_commitment_txn[0].output.iter().fold(0, |acc, output| acc + output.value);
4471 actual_fee = channel_value - actual_fee;
4472 assert_eq!(total_fee, actual_fee);
4475 //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
4476 //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
4477 nodes[0].node.update_fee(channel_id, feerate+2).unwrap();
4478 check_added_monitors!(nodes[0], 1);
4480 let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4482 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap());
4484 //While producing the commitment_signed response after handling a received update_fee request the
4485 //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
4486 //Should produce and error.
4487 let err = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed).unwrap_err();
4489 assert!(match err.err {
4490 "Funding remote cannot afford proposed new fee" => true,
4494 //clear the message we could not handle
4495 nodes[1].node.get_and_clear_pending_msg_events();
4499 fn test_update_fee_with_fundee_update_add_htlc() {
4500 let mut nodes = create_network(2);
4501 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4502 let channel_id = chan.2;
4505 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4507 let feerate = get_feerate!(nodes[0], channel_id);
4508 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
4509 check_added_monitors!(nodes[0], 1);
4511 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4512 assert_eq!(events_0.len(), 1);
4513 let (update_msg, commitment_signed) = match events_0[0] {
4514 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4515 (update_fee.as_ref(), commitment_signed)
4517 _ => panic!("Unexpected event"),
4519 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4520 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4521 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4522 check_added_monitors!(nodes[1], 1);
4524 let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap();
4526 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]);
4528 // nothing happens since node[1] is in AwaitingRemoteRevoke
4529 nodes[1].node.send_payment(route, our_payment_hash).unwrap();
4531 let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
4532 assert_eq!(added_monitors.len(), 0);
4533 added_monitors.clear();
4535 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4536 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4537 // node[1] has nothing to do
4539 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4540 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4541 check_added_monitors!(nodes[0], 1);
4543 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
4544 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4545 // No commitment_signed so get_event_msg's assert(len == 1) passes
4546 check_added_monitors!(nodes[0], 1);
4547 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
4548 check_added_monitors!(nodes[1], 1);
4549 // AwaitingRemoteRevoke ends here
4551 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4552 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
4553 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
4554 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
4555 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
4556 assert_eq!(commitment_update.update_fee.is_none(), true);
4558 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap();
4559 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
4560 check_added_monitors!(nodes[0], 1);
4561 let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4563 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap();
4564 check_added_monitors!(nodes[1], 1);
4565 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4567 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap();
4568 check_added_monitors!(nodes[1], 1);
4569 let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4570 // No commitment_signed so get_event_msg's assert(len == 1) passes
4572 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap();
4573 check_added_monitors!(nodes[0], 1);
4574 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4576 let events = nodes[0].node.get_and_clear_pending_events();
4577 assert_eq!(events.len(), 1);
4579 Event::PendingHTLCsForwardable { .. } => { },
4580 _ => panic!("Unexpected event"),
4582 nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
4583 nodes[0].node.process_pending_htlc_forwards();
4585 let events = nodes[0].node.get_and_clear_pending_events();
4586 assert_eq!(events.len(), 1);
4588 Event::PaymentReceived { .. } => { },
4589 _ => panic!("Unexpected event"),
4592 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
4594 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
4595 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
4596 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
4600 fn test_update_fee() {
4601 let nodes = create_network(2);
4602 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4603 let channel_id = chan.2;
4606 // (1) update_fee/commitment_signed ->
4607 // <- (2) revoke_and_ack
4608 // .- send (3) commitment_signed
4609 // (4) update_fee/commitment_signed ->
4610 // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
4611 // <- (3) commitment_signed delivered
4612 // send (6) revoke_and_ack -.
4613 // <- (5) deliver revoke_and_ack
4614 // (6) deliver revoke_and_ack ->
4615 // .- send (7) commitment_signed in response to (4)
4616 // <- (7) deliver commitment_signed
4617 // revoke_and_ack ->
4619 // Create and deliver (1)...
4620 let feerate = get_feerate!(nodes[0], channel_id);
4621 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
4622 check_added_monitors!(nodes[0], 1);
4624 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4625 assert_eq!(events_0.len(), 1);
4626 let (update_msg, commitment_signed) = match events_0[0] {
4627 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4628 (update_fee.as_ref(), commitment_signed)
4630 _ => panic!("Unexpected event"),
4632 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4634 // Generate (2) and (3):
4635 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4636 let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4637 check_added_monitors!(nodes[1], 1);
4640 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4641 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4642 check_added_monitors!(nodes[0], 1);
4644 // Create and deliver (4)...
4645 nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
4646 check_added_monitors!(nodes[0], 1);
4647 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4648 assert_eq!(events_0.len(), 1);
4649 let (update_msg, commitment_signed) = match events_0[0] {
4650 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4651 (update_fee.as_ref(), commitment_signed)
4653 _ => panic!("Unexpected event"),
4656 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4657 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4658 check_added_monitors!(nodes[1], 1);
4660 let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4661 // No commitment_signed so get_event_msg's assert(len == 1) passes
4663 // Handle (3), creating (6):
4664 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap();
4665 check_added_monitors!(nodes[0], 1);
4666 let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4667 // No commitment_signed so get_event_msg's assert(len == 1) passes
4670 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4671 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4672 check_added_monitors!(nodes[0], 1);
4674 // Deliver (6), creating (7):
4675 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap();
4676 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4677 assert!(commitment_update.update_add_htlcs.is_empty());
4678 assert!(commitment_update.update_fulfill_htlcs.is_empty());
4679 assert!(commitment_update.update_fail_htlcs.is_empty());
4680 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
4681 assert!(commitment_update.update_fee.is_none());
4682 check_added_monitors!(nodes[1], 1);
4685 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
4686 check_added_monitors!(nodes[0], 1);
4687 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4688 // No commitment_signed so get_event_msg's assert(len == 1) passes
4690 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
4691 check_added_monitors!(nodes[1], 1);
4692 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4694 assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
4695 assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
4696 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
4700 fn pre_funding_lock_shutdown_test() {
4701 // Test sending a shutdown prior to funding_locked after funding generation
4702 let nodes = create_network(2);
4703 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0);
4704 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4705 nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
4706 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
4708 nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap();
4709 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4710 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4711 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4712 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4714 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4715 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4716 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4717 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4718 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4719 assert!(node_0_none.is_none());
4721 assert!(nodes[0].node.list_channels().is_empty());
4722 assert!(nodes[1].node.list_channels().is_empty());
4726 fn updates_shutdown_wait() {
4727 // Test sending a shutdown with outstanding updates pending
4728 let mut nodes = create_network(3);
4729 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4730 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4731 let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
4732 let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
4734 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
4736 nodes[0].node.close_channel(&chan_1.2).unwrap();
4737 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4738 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4739 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4740 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4742 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4743 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4745 let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
4746 if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {}
4747 else { panic!("New sends should fail!") };
4748 if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {}
4749 else { panic!("New sends should fail!") };
4751 assert!(nodes[2].node.claim_funds(our_payment_preimage));
4752 check_added_monitors!(nodes[2], 1);
4753 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4754 assert!(updates.update_add_htlcs.is_empty());
4755 assert!(updates.update_fail_htlcs.is_empty());
4756 assert!(updates.update_fail_malformed_htlcs.is_empty());
4757 assert!(updates.update_fee.is_none());
4758 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4759 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
4760 check_added_monitors!(nodes[1], 1);
4761 let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4762 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
4764 assert!(updates_2.update_add_htlcs.is_empty());
4765 assert!(updates_2.update_fail_htlcs.is_empty());
4766 assert!(updates_2.update_fail_malformed_htlcs.is_empty());
4767 assert!(updates_2.update_fee.is_none());
4768 assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
4769 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
4770 commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
4772 let events = nodes[0].node.get_and_clear_pending_events();
4773 assert_eq!(events.len(), 1);
4775 Event::PaymentSent { ref payment_preimage } => {
4776 assert_eq!(our_payment_preimage, *payment_preimage);
4778 _ => panic!("Unexpected event"),
4781 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4782 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4783 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4784 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4785 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4786 assert!(node_0_none.is_none());
4788 assert!(nodes[0].node.list_channels().is_empty());
4790 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
4791 nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
4792 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
4793 assert!(nodes[1].node.list_channels().is_empty());
4794 assert!(nodes[2].node.list_channels().is_empty());
4798 fn htlc_fail_async_shutdown() {
4799 // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
4800 let mut nodes = create_network(3);
4801 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4802 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4804 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
4805 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4806 nodes[0].node.send_payment(route, our_payment_hash).unwrap();
4807 check_added_monitors!(nodes[0], 1);
4808 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4809 assert_eq!(updates.update_add_htlcs.len(), 1);
4810 assert!(updates.update_fulfill_htlcs.is_empty());
4811 assert!(updates.update_fail_htlcs.is_empty());
4812 assert!(updates.update_fail_malformed_htlcs.is_empty());
4813 assert!(updates.update_fee.is_none());
4815 nodes[1].node.close_channel(&chan_1.2).unwrap();
4816 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4817 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4818 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4820 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
4821 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
4822 check_added_monitors!(nodes[1], 1);
4823 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4824 commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
4826 let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4827 assert!(updates_2.update_add_htlcs.is_empty());
4828 assert!(updates_2.update_fulfill_htlcs.is_empty());
4829 assert_eq!(updates_2.update_fail_htlcs.len(), 1);
4830 assert!(updates_2.update_fail_malformed_htlcs.is_empty());
4831 assert!(updates_2.update_fee.is_none());
4833 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap();
4834 commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
4836 let events = nodes[0].node.get_and_clear_pending_events();
4837 assert_eq!(events.len(), 1);
4839 Event::PaymentFailed { ref payment_hash, ref rejected_by_dest } => {
4840 assert_eq!(our_payment_hash, *payment_hash);
4841 assert!(!rejected_by_dest);
4843 _ => panic!("Unexpected event"),
4846 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4847 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4848 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4849 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4850 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4851 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4852 assert!(node_0_none.is_none());
4854 assert!(nodes[0].node.list_channels().is_empty());
4856 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
4857 nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
4858 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
4859 assert!(nodes[1].node.list_channels().is_empty());
4860 assert!(nodes[2].node.list_channels().is_empty());
4864 fn update_fee_async_shutdown() {
4865 // Test update_fee works after shutdown start if messages are delivered out-of-order
4866 let nodes = create_network(2);
4867 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4869 let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate();
4870 nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap();
4871 check_added_monitors!(nodes[0], 1);
4872 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4873 assert!(updates.update_add_htlcs.is_empty());
4874 assert!(updates.update_fulfill_htlcs.is_empty());
4875 assert!(updates.update_fail_htlcs.is_empty());
4876 assert!(updates.update_fail_malformed_htlcs.is_empty());
4877 assert!(updates.update_fee.is_some());
4879 nodes[1].node.close_channel(&chan_1.2).unwrap();
4880 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4881 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4882 // Note that we don't actually test normative behavior here. The spec indicates we could
4883 // actually send a closing_signed here, but is kinda unclear and could possibly be amended
4884 // to require waiting on the full commitment dance before doing so (see
4885 // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid
4886 // ambiguity, we should wait until after the full commitment dance to send closing_signed.
4887 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4889 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap();
4890 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
4891 check_added_monitors!(nodes[1], 1);
4892 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4893 let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true);
4895 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4896 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() {
4897 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
4898 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
4901 _ => panic!("Unexpected event"),
4903 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4904 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4905 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4906 assert!(node_0_none.is_none());
4909 fn do_test_shutdown_rebroadcast(recv_count: u8) {
4910 // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
4911 // messages delivered prior to disconnect
4912 let nodes = create_network(3);
4913 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4914 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4916 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
4918 nodes[1].node.close_channel(&chan_1.2).unwrap();
4919 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4921 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4922 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4924 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4928 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4929 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4931 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
4932 let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
4933 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
4934 let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
4936 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap();
4937 let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4938 assert!(node_1_shutdown == node_1_2nd_shutdown);
4940 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap();
4941 let node_0_2nd_shutdown = if recv_count > 0 {
4942 let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4943 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
4946 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4947 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
4948 get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
4950 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap();
4952 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4953 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4955 assert!(nodes[2].node.claim_funds(our_payment_preimage));
4956 check_added_monitors!(nodes[2], 1);
4957 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4958 assert!(updates.update_add_htlcs.is_empty());
4959 assert!(updates.update_fail_htlcs.is_empty());
4960 assert!(updates.update_fail_malformed_htlcs.is_empty());
4961 assert!(updates.update_fee.is_none());
4962 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4963 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
4964 check_added_monitors!(nodes[1], 1);
4965 let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4966 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
4968 assert!(updates_2.update_add_htlcs.is_empty());
4969 assert!(updates_2.update_fail_htlcs.is_empty());
4970 assert!(updates_2.update_fail_malformed_htlcs.is_empty());
4971 assert!(updates_2.update_fee.is_none());
4972 assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
4973 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
4974 commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
4976 let events = nodes[0].node.get_and_clear_pending_events();
4977 assert_eq!(events.len(), 1);
4979 Event::PaymentSent { ref payment_preimage } => {
4980 assert_eq!(our_payment_preimage, *payment_preimage);
4982 _ => panic!("Unexpected event"),
4985 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4987 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4988 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4989 assert!(node_1_closing_signed.is_some());
4992 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4993 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4995 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
4996 let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
4997 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
4998 if recv_count == 0 {
4999 // If all closing_signeds weren't delivered we can just resume where we left off...
5000 let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
5002 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap();
5003 let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
5004 assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
5006 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap();
5007 let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
5008 assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
5010 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap();
5011 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
5013 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap();
5014 let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
5015 assert!(node_0_closing_signed == node_0_2nd_closing_signed);
5017 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap();
5018 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
5019 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
5020 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
5021 assert!(node_0_none.is_none());
5023 // If one node, however, received + responded with an identical closing_signed we end
5024 // up erroring and node[0] will try to broadcast its own latest commitment transaction.
5025 // There isn't really anything better we can do simply, but in the future we might
5026 // explore storing a set of recently-closed channels that got disconnected during
5027 // closing_signed and avoiding broadcasting local commitment txn for some timeout to
5028 // give our counterparty enough time to (potentially) broadcast a cooperative closing
5030 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
5032 if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) =
5033 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
5034 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
5035 let msgs::ErrorMessage {ref channel_id, ..} = msg;
5036 assert_eq!(*channel_id, chan_1.2);
5037 } else { panic!("Needed SendErrorMessage close"); }
5039 // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
5040 // checks it, but in this case nodes[0] didn't ever get a chance to receive a
5041 // closing_signed so we do it ourselves
5042 let events = nodes[0].node.get_and_clear_pending_msg_events();
5043 assert_eq!(events.len(), 1);
5045 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
5046 assert_eq!(msg.contents.flags & 2, 2);
5048 _ => panic!("Unexpected event"),
5052 assert!(nodes[0].node.list_channels().is_empty());
5054 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
5055 nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
5056 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
5057 assert!(nodes[1].node.list_channels().is_empty());
5058 assert!(nodes[2].node.list_channels().is_empty());
5062 fn test_shutdown_rebroadcast() {
5063 do_test_shutdown_rebroadcast(0);
5064 do_test_shutdown_rebroadcast(1);
5065 do_test_shutdown_rebroadcast(2);
5069 fn fake_network_test() {
5070 // Simple test which builds a network of ChannelManagers, connects them to each other, and
5071 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
5072 let nodes = create_network(4);
5074 // Create some initial channels
5075 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5076 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5077 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5079 // Rebalance the network a bit by relaying one payment through all the channels...
5080 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5081 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5082 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5083 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5085 // Send some more payments
5086 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
5087 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
5088 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
5090 // Test failure packets
5091 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
5092 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
5094 // Add a new channel that skips 3
5095 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
5097 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
5098 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
5099 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5100 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5101 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5102 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5103 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5105 // Do some rebalance loop payments, simultaneously
5106 let mut hops = Vec::with_capacity(3);
5107 hops.push(RouteHop {
5108 pubkey: nodes[2].node.get_our_node_id(),
5109 short_channel_id: chan_2.0.contents.short_channel_id,
5111 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
5113 hops.push(RouteHop {
5114 pubkey: nodes[3].node.get_our_node_id(),
5115 short_channel_id: chan_3.0.contents.short_channel_id,
5117 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
5119 hops.push(RouteHop {
5120 pubkey: nodes[1].node.get_our_node_id(),
5121 short_channel_id: chan_4.0.contents.short_channel_id,
5123 cltv_expiry_delta: TEST_FINAL_CLTV,
5125 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
5126 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
5127 let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
5129 let mut hops = Vec::with_capacity(3);
5130 hops.push(RouteHop {
5131 pubkey: nodes[3].node.get_our_node_id(),
5132 short_channel_id: chan_4.0.contents.short_channel_id,
5134 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
5136 hops.push(RouteHop {
5137 pubkey: nodes[2].node.get_our_node_id(),
5138 short_channel_id: chan_3.0.contents.short_channel_id,
5140 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
5142 hops.push(RouteHop {
5143 pubkey: nodes[1].node.get_our_node_id(),
5144 short_channel_id: chan_2.0.contents.short_channel_id,
5146 cltv_expiry_delta: TEST_FINAL_CLTV,
5148 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
5149 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
5150 let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
5152 // Claim the rebalances...
5153 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
5154 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
5156 // Add a duplicate new channel from 2 to 4
5157 let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
5159 // Send some payments across both channels
5160 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
5161 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
5162 let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
5164 route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
5166 //TODO: Test that routes work again here as we've been notified that the channel is full
5168 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
5169 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
5170 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
5172 // Close down the channels...
5173 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
5174 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
5175 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
5176 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
5177 close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
5181 fn duplicate_htlc_test() {
5182 // Test that we accept duplicate payment_hash HTLCs across the network and that
5183 // claiming/failing them are all separate and don't effect each other
5184 let mut nodes = create_network(6);
5186 // Create some initial channels to route via 3 to 4/5 from 0/1/2
5187 create_announced_chan_between_nodes(&nodes, 0, 3);
5188 create_announced_chan_between_nodes(&nodes, 1, 3);
5189 create_announced_chan_between_nodes(&nodes, 2, 3);
5190 create_announced_chan_between_nodes(&nodes, 3, 4);
5191 create_announced_chan_between_nodes(&nodes, 3, 5);
5193 let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
5195 *nodes[0].network_payment_count.borrow_mut() -= 1;
5196 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
5198 *nodes[0].network_payment_count.borrow_mut() -= 1;
5199 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
5201 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
5202 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
5203 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
5206 #[derive(PartialEq)]
5207 enum HTLCType { NONE, TIMEOUT, SUCCESS }
5208 /// Tests that the given node has broadcast transactions for the given Channel
5210 /// First checks that the latest local commitment tx has been broadcast, unless an explicit
5211 /// commitment_tx is provided, which may be used to test that a remote commitment tx was
5212 /// broadcast and the revoked outputs were claimed.
5214 /// Next tests that there is (or is not) a transaction that spends the commitment transaction
5215 /// that appears to be the type of HTLC transaction specified in has_htlc_tx.
5217 /// All broadcast transactions must be accounted for in one of the above three types of we'll
5219 fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
5220 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
5221 assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
5223 let mut res = Vec::with_capacity(2);
5224 node_txn.retain(|tx| {
5225 if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
5226 check_spends!(tx, chan.3.clone());
5227 if commitment_tx.is_none() {
5228 res.push(tx.clone());
5233 if let Some(explicit_tx) = commitment_tx {
5234 res.push(explicit_tx.clone());
5237 assert_eq!(res.len(), 1);
5239 if has_htlc_tx != HTLCType::NONE {
5240 node_txn.retain(|tx| {
5241 if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
5242 check_spends!(tx, res[0].clone());
5243 if has_htlc_tx == HTLCType::TIMEOUT {
5244 assert!(tx.lock_time != 0);
5246 assert!(tx.lock_time == 0);
5248 res.push(tx.clone());
5252 assert_eq!(res.len(), 2);
5255 assert!(node_txn.is_empty());
5259 /// Tests that the given node has broadcast a claim transaction against the provided revoked
5260 /// HTLC transaction.
5261 fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
5262 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
5263 assert_eq!(node_txn.len(), 1);
5264 node_txn.retain(|tx| {
5265 if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
5266 check_spends!(tx, revoked_tx.clone());
5270 assert!(node_txn.is_empty());
5273 fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
5274 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
5276 assert!(node_txn.len() >= 1);
5277 assert_eq!(node_txn[0].input.len(), 1);
5278 let mut found_prev = false;
5280 for tx in prev_txn {
5281 if node_txn[0].input[0].previous_output.txid == tx.txid() {
5282 check_spends!(node_txn[0], tx.clone());
5283 assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
5284 assert_eq!(tx.input.len(), 1); // must spend a commitment tx
5290 assert!(found_prev);
5292 let mut res = Vec::new();
5293 mem::swap(&mut *node_txn, &mut res);
5297 fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
5298 let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
5299 assert_eq!(events_1.len(), 1);
5300 let as_update = match events_1[0] {
5301 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
5304 _ => panic!("Unexpected event"),
5307 let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
5308 assert_eq!(events_2.len(), 1);
5309 let bs_update = match events_2[0] {
5310 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
5313 _ => panic!("Unexpected event"),
5317 node.router.handle_channel_update(&as_update).unwrap();
5318 node.router.handle_channel_update(&bs_update).unwrap();
5322 macro_rules! expect_pending_htlcs_forwardable {
5324 let events = $node.node.get_and_clear_pending_events();
5325 assert_eq!(events.len(), 1);
5327 Event::PendingHTLCsForwardable { .. } => { },
5328 _ => panic!("Unexpected event"),
5330 $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
5331 $node.node.process_pending_htlc_forwards();
5336 fn channel_reserve_test() {
5338 use std::sync::atomic::Ordering;
5339 use ln::msgs::HandleError;
5341 macro_rules! get_channel_value_stat {
5342 ($node: expr, $channel_id: expr) => {{
5343 let chan_lock = $node.node.channel_state.lock().unwrap();
5344 let chan = chan_lock.by_id.get(&$channel_id).unwrap();
5345 chan.get_value_stat()
5349 let mut nodes = create_network(3);
5350 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001);
5351 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001);
5353 let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
5354 let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
5356 let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
5357 let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
5359 macro_rules! get_route_and_payment_hash {
5360 ($recv_value: expr) => {{
5361 let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
5362 let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
5363 (route, payment_hash, payment_preimage)
5367 macro_rules! expect_forward {
5369 let mut events = $node.node.get_and_clear_pending_msg_events();
5370 assert_eq!(events.len(), 1);
5371 check_added_monitors!($node, 1);
5372 let payment_event = SendEvent::from_event(events.remove(0));
5377 macro_rules! expect_payment_received {
5378 ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
5379 let events = $node.node.get_and_clear_pending_events();
5380 assert_eq!(events.len(), 1);
5382 Event::PaymentReceived { ref payment_hash, amt } => {
5383 assert_eq!($expected_payment_hash, *payment_hash);
5384 assert_eq!($expected_recv_value, amt);
5386 _ => panic!("Unexpected event"),
5391 let feemsat = 239; // somehow we know?
5392 let total_fee_msat = (nodes.len() - 2) as u64 * 239;
5394 let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
5396 // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
5398 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
5399 assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
5400 let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
5402 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
5403 _ => panic!("Unknown error variants"),
5407 let mut htlc_id = 0;
5408 // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
5409 // nodes[0]'s wealth
5411 let amt_msat = recv_value_0 + total_fee_msat;
5412 if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
5415 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
5418 let (stat01_, stat11_, stat12_, stat22_) = (
5419 get_channel_value_stat!(nodes[0], chan_1.2),
5420 get_channel_value_stat!(nodes[1], chan_1.2),
5421 get_channel_value_stat!(nodes[1], chan_2.2),
5422 get_channel_value_stat!(nodes[2], chan_2.2),
5425 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
5426 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
5427 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
5428 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
5429 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
5433 let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
5434 // attempt to get channel_reserve violation
5435 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
5436 let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
5438 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
5439 _ => panic!("Unknown error variants"),
5443 // adding pending output
5444 let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
5445 let amt_msat_1 = recv_value_1 + total_fee_msat;
5447 let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
5448 let payment_event_1 = {
5449 nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
5450 check_added_monitors!(nodes[0], 1);
5452 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
5453 assert_eq!(events.len(), 1);
5454 SendEvent::from_event(events.remove(0))
5456 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
5458 // channel reserve test with htlc pending output > 0
5459 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
5461 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
5462 match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
5463 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
5464 _ => panic!("Unknown error variants"),
5469 // test channel_reserve test on nodes[1] side
5470 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
5472 // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
5473 let secp_ctx = Secp256k1::new();
5474 let session_priv = SecretKey::from_slice(&secp_ctx, &{
5475 let mut session_key = [0; 32];
5476 rng::fill_bytes(&mut session_key);
5478 }).expect("RNG is bad!");
5480 let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
5481 let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
5482 let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap();
5483 let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
5484 let msg = msgs::UpdateAddHTLC {
5485 channel_id: chan_1.2,
5487 amount_msat: htlc_msat,
5488 payment_hash: our_payment_hash,
5489 cltv_expiry: htlc_cltv,
5490 onion_routing_packet: onion_packet,
5493 let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
5495 HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
5499 // split the rest to test holding cell
5500 let recv_value_21 = recv_value_2/2;
5501 let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
5503 let stat = get_channel_value_stat!(nodes[0], chan_1.2);
5504 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
5507 // now see if they go through on both sides
5508 let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
5509 // but this will stuck in the holding cell
5510 nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
5511 check_added_monitors!(nodes[0], 0);
5512 let events = nodes[0].node.get_and_clear_pending_events();
5513 assert_eq!(events.len(), 0);
5515 // test with outbound holding cell amount > 0
5517 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
5518 match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
5519 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
5520 _ => panic!("Unknown error variants"),
5524 let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
5525 // this will also stuck in the holding cell
5526 nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
5527 check_added_monitors!(nodes[0], 0);
5528 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
5529 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5531 // flush the pending htlc
5532 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
5533 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5534 check_added_monitors!(nodes[1], 1);
5536 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
5537 check_added_monitors!(nodes[0], 1);
5538 let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5540 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed).unwrap();
5541 let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
5542 // No commitment_signed so get_event_msg's assert(len == 1) passes
5543 check_added_monitors!(nodes[0], 1);
5545 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
5546 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
5547 check_added_monitors!(nodes[1], 1);
5549 expect_pending_htlcs_forwardable!(nodes[1]);
5551 let ref payment_event_11 = expect_forward!(nodes[1]);
5552 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
5553 commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
5555 expect_pending_htlcs_forwardable!(nodes[2]);
5556 expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
5558 // flush the htlcs in the holding cell
5559 assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
5560 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
5561 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
5562 commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
5563 expect_pending_htlcs_forwardable!(nodes[1]);
5565 let ref payment_event_3 = expect_forward!(nodes[1]);
5566 assert_eq!(payment_event_3.msgs.len(), 2);
5567 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
5568 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
5570 commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
5571 expect_pending_htlcs_forwardable!(nodes[2]);
5573 let events = nodes[2].node.get_and_clear_pending_events();
5574 assert_eq!(events.len(), 2);
5576 Event::PaymentReceived { ref payment_hash, amt } => {
5577 assert_eq!(our_payment_hash_21, *payment_hash);
5578 assert_eq!(recv_value_21, amt);
5580 _ => panic!("Unexpected event"),
5583 Event::PaymentReceived { ref payment_hash, amt } => {
5584 assert_eq!(our_payment_hash_22, *payment_hash);
5585 assert_eq!(recv_value_22, amt);
5587 _ => panic!("Unexpected event"),
5590 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
5591 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
5592 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
5594 let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
5595 let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
5596 assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
5597 assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
5599 let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
5600 assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
5604 fn channel_monitor_network_test() {
5605 // Simple test which builds a network of ChannelManagers, connects them to each other, and
5606 // tests that ChannelMonitor is able to recover from various states.
5607 let nodes = create_network(5);
5609 // Create some initial channels
5610 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5611 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5612 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5613 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5615 // Rebalance the network a bit by relaying one payment through all the channels...
5616 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5617 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5618 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5619 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5621 // Simple case with no pending HTLCs:
5622 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
5624 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
5625 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5626 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
5627 test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
5629 get_announce_close_broadcast_events(&nodes, 0, 1);
5630 assert_eq!(nodes[0].node.list_channels().len(), 0);
5631 assert_eq!(nodes[1].node.list_channels().len(), 1);
5633 // One pending HTLC is discarded by the force-close:
5634 let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
5636 // Simple case of one pending HTLC to HTLC-Timeout
5637 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
5639 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
5640 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5641 nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
5642 test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
5644 get_announce_close_broadcast_events(&nodes, 1, 2);
5645 assert_eq!(nodes[1].node.list_channels().len(), 0);
5646 assert_eq!(nodes[2].node.list_channels().len(), 1);
5648 macro_rules! claim_funds {
5649 ($node: expr, $prev_node: expr, $preimage: expr) => {
5651 assert!($node.node.claim_funds($preimage));
5652 check_added_monitors!($node, 1);
5654 let events = $node.node.get_and_clear_pending_msg_events();
5655 assert_eq!(events.len(), 1);
5657 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
5658 assert!(update_add_htlcs.is_empty());
5659 assert!(update_fail_htlcs.is_empty());
5660 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
5662 _ => panic!("Unexpected event"),
5668 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
5669 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
5670 nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
5672 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
5674 // Claim the payment on nodes[3], giving it knowledge of the preimage
5675 claim_funds!(nodes[3], nodes[2], payment_preimage_1);
5677 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5678 nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
5680 check_preimage_claim(&nodes[3], &node_txn);
5682 get_announce_close_broadcast_events(&nodes, 2, 3);
5683 assert_eq!(nodes[2].node.list_channels().len(), 0);
5684 assert_eq!(nodes[3].node.list_channels().len(), 1);
5686 { // Cheat and reset nodes[4]'s height to 1
5687 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5688 nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
5691 assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
5692 assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
5693 // One pending HTLC to time out:
5694 let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
5695 // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
5699 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5700 nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
5701 for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
5702 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5703 nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
5706 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
5708 // Claim the payment on nodes[4], giving it knowledge of the preimage
5709 claim_funds!(nodes[4], nodes[3], payment_preimage_2);
5711 header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5712 nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
5713 for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
5714 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5715 nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
5718 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
5720 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5721 nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
5723 check_preimage_claim(&nodes[4], &node_txn);
5725 get_announce_close_broadcast_events(&nodes, 3, 4);
5726 assert_eq!(nodes[3].node.list_channels().len(), 0);
5727 assert_eq!(nodes[4].node.list_channels().len(), 0);
5731 fn test_justice_tx() {
5732 // Test justice txn built on revoked HTLC-Success tx, against both sides
5734 let nodes = create_network(2);
5735 // Create some new channels:
5736 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
5738 // A pending HTLC which will be revoked:
5739 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5740 // Get the will-be-revoked local txn from nodes[0]
5741 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
5742 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
5743 assert_eq!(revoked_local_txn[0].input.len(), 1);
5744 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
5745 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
5746 assert_eq!(revoked_local_txn[1].input.len(), 1);
5747 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
5748 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
5749 // Revoke the old state
5750 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
5753 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5754 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5756 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5757 assert_eq!(node_txn.len(), 3);
5758 assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
5759 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
5761 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5762 node_txn.swap_remove(0);
5764 test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
5766 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5767 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
5768 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5769 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
5770 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
5772 get_announce_close_broadcast_events(&nodes, 0, 1);
5774 assert_eq!(nodes[0].node.list_channels().len(), 0);
5775 assert_eq!(nodes[1].node.list_channels().len(), 0);
5777 // We test justice_tx build by A on B's revoked HTLC-Success tx
5778 // Create some new channels:
5779 let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
5781 // A pending HTLC which will be revoked:
5782 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5783 // Get the will-be-revoked local txn from B
5784 let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
5785 assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
5786 assert_eq!(revoked_local_txn[0].input.len(), 1);
5787 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
5788 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
5789 // Revoke the old state
5790 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
5792 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5793 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5795 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5796 assert_eq!(node_txn.len(), 3);
5797 assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
5798 assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
5800 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5801 node_txn.swap_remove(0);
5803 test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
5805 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5806 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
5807 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5808 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
5809 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone());
5811 get_announce_close_broadcast_events(&nodes, 0, 1);
5812 assert_eq!(nodes[0].node.list_channels().len(), 0);
5813 assert_eq!(nodes[1].node.list_channels().len(), 0);
5817 fn revoked_output_claim() {
5818 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
5819 // transaction is broadcast by its counterparty
5820 let nodes = create_network(2);
5821 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5822 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
5823 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
5824 assert_eq!(revoked_local_txn.len(), 1);
5825 // Only output is the full channel value back to nodes[0]:
5826 assert_eq!(revoked_local_txn[0].output.len(), 1);
5827 // Send a payment through, updating everyone's latest commitment txn
5828 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
5830 // Inform nodes[1] that nodes[0] broadcast a stale tx
5831 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5832 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5833 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5834 assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
5836 assert_eq!(node_txn[0], node_txn[2]);
5838 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5839 check_spends!(node_txn[1], chan_1.3.clone());
5841 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
5842 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5843 get_announce_close_broadcast_events(&nodes, 0, 1);
5847 fn claim_htlc_outputs_shared_tx() {
5848 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
5849 let nodes = create_network(2);
5851 // Create some new channel:
5852 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5854 // Rebalance the network to generate htlc in the two directions
5855 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5856 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
5857 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5858 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
5860 // Get the will-be-revoked local txn from node[0]
5861 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
5862 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
5863 assert_eq!(revoked_local_txn[0].input.len(), 1);
5864 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5865 assert_eq!(revoked_local_txn[1].input.len(), 1);
5866 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
5867 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
5868 check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
5870 //Revoke the old state
5871 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
5874 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5876 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5878 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5879 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5880 assert_eq!(node_txn.len(), 4);
5882 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
5883 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5885 assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
5887 let mut witness_lens = BTreeSet::new();
5888 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
5889 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
5890 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
5891 assert_eq!(witness_lens.len(), 3);
5892 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
5893 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
5894 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
5896 // Next nodes[1] broadcasts its current local tx state:
5897 assert_eq!(node_txn[1].input.len(), 1);
5898 assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
5900 assert_eq!(node_txn[2].input.len(), 1);
5901 let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
5902 assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
5903 assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
5904 assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
5905 assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
5907 get_announce_close_broadcast_events(&nodes, 0, 1);
5908 assert_eq!(nodes[0].node.list_channels().len(), 0);
5909 assert_eq!(nodes[1].node.list_channels().len(), 0);
5913 fn claim_htlc_outputs_single_tx() {
5914 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
5915 let nodes = create_network(2);
5917 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5919 // Rebalance the network to generate htlc in the two directions
5920 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5921 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
5922 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
5923 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5924 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
5926 // Get the will-be-revoked local txn from node[0]
5927 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
5929 //Revoke the old state
5930 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
5933 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5935 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
5937 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
5938 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5939 assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan)
5941 assert_eq!(node_txn[0], node_txn[7]);
5942 assert_eq!(node_txn[1], node_txn[8]);
5943 assert_eq!(node_txn[2], node_txn[9]);
5944 assert_eq!(node_txn[3], node_txn[10]);
5945 assert_eq!(node_txn[4], node_txn[11]);
5946 assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger
5947 assert_eq!(node_txn[4], node_txn[6]);
5949 assert_eq!(node_txn[0].input.len(), 1);
5950 assert_eq!(node_txn[1].input.len(), 1);
5951 assert_eq!(node_txn[2].input.len(), 1);
5953 let mut revoked_tx_map = HashMap::new();
5954 revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone());
5955 node_txn[0].verify(&revoked_tx_map).unwrap();
5956 node_txn[1].verify(&revoked_tx_map).unwrap();
5957 node_txn[2].verify(&revoked_tx_map).unwrap();
5959 let mut witness_lens = BTreeSet::new();
5960 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
5961 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
5962 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
5963 assert_eq!(witness_lens.len(), 3);
5964 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
5965 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
5966 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
5968 assert_eq!(node_txn[3].input.len(), 1);
5969 check_spends!(node_txn[3], chan_1.3.clone());
5971 assert_eq!(node_txn[4].input.len(), 1);
5972 let witness_script = node_txn[4].input[0].witness.last().unwrap();
5973 assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
5974 assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
5975 assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
5976 assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
5978 get_announce_close_broadcast_events(&nodes, 0, 1);
5979 assert_eq!(nodes[0].node.list_channels().len(), 0);
5980 assert_eq!(nodes[1].node.list_channels().len(), 0);
5984 fn test_htlc_ignore_latest_remote_commitment() {
5985 // Test that HTLC transactions spending the latest remote commitment transaction are simply
5986 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
5987 let nodes = create_network(2);
5988 create_announced_chan_between_nodes(&nodes, 0, 1);
5990 route_payment(&nodes[0], &[&nodes[1]], 10000000);
5991 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
5993 let events = nodes[0].node.get_and_clear_pending_msg_events();
5994 assert_eq!(events.len(), 1);
5996 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
5997 assert_eq!(flags & 0b10, 0b10);
5999 _ => panic!("Unexpected event"),
6003 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
6004 assert_eq!(node_txn.len(), 2);
6006 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6007 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
6010 let events = nodes[1].node.get_and_clear_pending_msg_events();
6011 assert_eq!(events.len(), 1);
6013 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6014 assert_eq!(flags & 0b10, 0b10);
6016 _ => panic!("Unexpected event"),
6020 // Duplicate the block_connected call since this may happen due to other listeners
6021 // registering new transactions
6022 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
6026 fn test_force_close_fail_back() {
6027 // Check which HTLCs are failed-backwards on channel force-closure
6028 let mut nodes = create_network(3);
6029 create_announced_chan_between_nodes(&nodes, 0, 1);
6030 create_announced_chan_between_nodes(&nodes, 1, 2);
6032 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
6034 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
6036 let mut payment_event = {
6037 nodes[0].node.send_payment(route, our_payment_hash).unwrap();
6038 check_added_monitors!(nodes[0], 1);
6040 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6041 assert_eq!(events.len(), 1);
6042 SendEvent::from_event(events.remove(0))
6045 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6046 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6048 let events_1 = nodes[1].node.get_and_clear_pending_events();
6049 assert_eq!(events_1.len(), 1);
6051 Event::PendingHTLCsForwardable { .. } => { },
6052 _ => panic!("Unexpected event"),
6055 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
6056 nodes[1].node.process_pending_htlc_forwards();
6058 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6059 assert_eq!(events_2.len(), 1);
6060 payment_event = SendEvent::from_event(events_2.remove(0));
6061 assert_eq!(payment_event.msgs.len(), 1);
6063 check_added_monitors!(nodes[1], 1);
6064 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6065 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
6066 check_added_monitors!(nodes[2], 1);
6067 let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6069 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
6070 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
6071 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
6073 nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
6074 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6075 assert_eq!(events_3.len(), 1);
6077 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6078 assert_eq!(flags & 0b10, 0b10);
6080 _ => panic!("Unexpected event"),
6084 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
6085 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
6086 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
6087 // back to nodes[1] upon timeout otherwise.
6088 assert_eq!(node_txn.len(), 1);
6092 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6093 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
6095 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6096 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
6097 assert_eq!(events_4.len(), 1);
6099 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6100 assert_eq!(flags & 0b10, 0b10);
6102 _ => panic!("Unexpected event"),
6105 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
6107 let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
6108 monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap()
6109 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
6111 nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
6112 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
6113 assert_eq!(node_txn.len(), 1);
6114 assert_eq!(node_txn[0].input.len(), 1);
6115 assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
6116 assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
6117 assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
6119 check_spends!(node_txn[0], tx);
6123 fn test_unconf_chan() {
6124 // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
6125 let nodes = create_network(2);
6126 create_announced_chan_between_nodes(&nodes, 0, 1);
6128 let channel_state = nodes[0].node.channel_state.lock().unwrap();
6129 assert_eq!(channel_state.by_id.len(), 1);
6130 assert_eq!(channel_state.short_to_id.len(), 1);
6131 mem::drop(channel_state);
6133 let mut headers = Vec::new();
6134 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6135 headers.push(header.clone());
6137 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6138 headers.push(header.clone());
6140 while !headers.is_empty() {
6141 nodes[0].node.block_disconnected(&headers.pop().unwrap());
6144 let events = nodes[0].node.get_and_clear_pending_msg_events();
6145 assert_eq!(events.len(), 1);
6147 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6148 assert_eq!(flags & 0b10, 0b10);
6150 _ => panic!("Unexpected event"),
6153 let channel_state = nodes[0].node.channel_state.lock().unwrap();
6154 assert_eq!(channel_state.by_id.len(), 0);
6155 assert_eq!(channel_state.short_to_id.len(), 0);
6158 macro_rules! get_chan_reestablish_msgs {
6159 ($src_node: expr, $dst_node: expr) => {
6161 let mut res = Vec::with_capacity(1);
6162 for msg in $src_node.node.get_and_clear_pending_msg_events() {
6163 if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
6164 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6165 res.push(msg.clone());
6167 panic!("Unexpected event")
6175 macro_rules! handle_chan_reestablish_msgs {
6176 ($src_node: expr, $dst_node: expr) => {
6178 let msg_events = $src_node.node.get_and_clear_pending_msg_events();
6180 let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) {
6182 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6188 let mut revoke_and_ack = None;
6189 let mut commitment_update = None;
6190 let order = if let Some(ev) = msg_events.get(idx) {
6193 &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
6194 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6195 revoke_and_ack = Some(msg.clone());
6196 RAACommitmentOrder::RevokeAndACKFirst
6198 &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
6199 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6200 commitment_update = Some(updates.clone());
6201 RAACommitmentOrder::CommitmentFirst
6203 _ => panic!("Unexpected event"),
6206 RAACommitmentOrder::CommitmentFirst
6209 if let Some(ev) = msg_events.get(idx) {
6211 &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
6212 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6213 assert!(revoke_and_ack.is_none());
6214 revoke_and_ack = Some(msg.clone());
6216 &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
6217 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6218 assert!(commitment_update.is_none());
6219 commitment_update = Some(updates.clone());
6221 _ => panic!("Unexpected event"),
6225 (funding_locked, revoke_and_ack, commitment_update, order)
6230 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
6231 /// for claims/fails they are separated out.
6232 fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
6233 node_a.node.peer_connected(&node_b.node.get_our_node_id());
6234 let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
6235 node_b.node.peer_connected(&node_a.node.get_our_node_id());
6236 let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
6238 let mut resp_1 = Vec::new();
6239 for msg in reestablish_1 {
6240 node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap();
6241 resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a));
6243 if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
6244 check_added_monitors!(node_b, 1);
6246 check_added_monitors!(node_b, 0);
6249 let mut resp_2 = Vec::new();
6250 for msg in reestablish_2 {
6251 node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap();
6252 resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b));
6254 if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
6255 check_added_monitors!(node_a, 1);
6257 check_added_monitors!(node_a, 0);
6260 // We dont yet support both needing updates, as that would require a different commitment dance:
6261 assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
6262 (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
6264 for chan_msgs in resp_1.drain(..) {
6266 node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
6267 let announcement_event = node_a.node.get_and_clear_pending_msg_events();
6268 if !announcement_event.is_empty() {
6269 assert_eq!(announcement_event.len(), 1);
6270 if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
6271 //TODO: Test announcement_sigs re-sending
6272 } else { panic!("Unexpected event!"); }
6275 assert!(chan_msgs.0.is_none());
6278 assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
6279 node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
6280 assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
6281 check_added_monitors!(node_a, 1);
6283 assert!(chan_msgs.1.is_none());
6285 if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
6286 let commitment_update = chan_msgs.2.unwrap();
6287 if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
6288 assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
6290 assert!(commitment_update.update_add_htlcs.is_empty());
6292 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
6293 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
6294 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
6295 for update_add in commitment_update.update_add_htlcs {
6296 node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
6298 for update_fulfill in commitment_update.update_fulfill_htlcs {
6299 node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
6301 for update_fail in commitment_update.update_fail_htlcs {
6302 node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
6305 if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
6306 commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
6308 node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
6309 check_added_monitors!(node_a, 1);
6310 let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id());
6311 // No commitment_signed so get_event_msg's assert(len == 1) passes
6312 node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6313 assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
6314 check_added_monitors!(node_b, 1);
6317 assert!(chan_msgs.2.is_none());
6321 for chan_msgs in resp_2.drain(..) {
6323 node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
6324 let announcement_event = node_b.node.get_and_clear_pending_msg_events();
6325 if !announcement_event.is_empty() {
6326 assert_eq!(announcement_event.len(), 1);
6327 if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
6328 //TODO: Test announcement_sigs re-sending
6329 } else { panic!("Unexpected event!"); }
6332 assert!(chan_msgs.0.is_none());
6335 assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
6336 node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
6337 assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
6338 check_added_monitors!(node_b, 1);
6340 assert!(chan_msgs.1.is_none());
6342 if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
6343 let commitment_update = chan_msgs.2.unwrap();
6344 if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
6345 assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
6347 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
6348 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
6349 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
6350 for update_add in commitment_update.update_add_htlcs {
6351 node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
6353 for update_fulfill in commitment_update.update_fulfill_htlcs {
6354 node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
6356 for update_fail in commitment_update.update_fail_htlcs {
6357 node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
6360 if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
6361 commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
6363 node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
6364 check_added_monitors!(node_b, 1);
6365 let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id());
6366 // No commitment_signed so get_event_msg's assert(len == 1) passes
6367 node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6368 assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
6369 check_added_monitors!(node_a, 1);
6372 assert!(chan_msgs.2.is_none());
6378 fn test_simple_peer_disconnect() {
6379 // Test that we can reconnect when there are no lost messages
6380 let nodes = create_network(3);
6381 create_announced_chan_between_nodes(&nodes, 0, 1);
6382 create_announced_chan_between_nodes(&nodes, 1, 2);
6384 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6385 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6386 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6388 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
6389 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
6390 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
6391 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
6393 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6394 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6395 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6397 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
6398 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
6399 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
6400 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
6402 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6403 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6405 claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
6406 fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
6408 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
6410 let events = nodes[0].node.get_and_clear_pending_events();
6411 assert_eq!(events.len(), 2);
6413 Event::PaymentSent { payment_preimage } => {
6414 assert_eq!(payment_preimage, payment_preimage_3);
6416 _ => panic!("Unexpected event"),
6419 Event::PaymentFailed { payment_hash, rejected_by_dest } => {
6420 assert_eq!(payment_hash, payment_hash_5);
6421 assert!(rejected_by_dest);
6423 _ => panic!("Unexpected event"),
6427 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
6428 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
6431 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
6432 // Test that we can reconnect when in-flight HTLC updates get dropped
6433 let mut nodes = create_network(2);
6434 if messages_delivered == 0 {
6435 create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
6436 // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
6438 create_announced_chan_between_nodes(&nodes, 0, 1);
6441 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6442 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
6444 let payment_event = {
6445 nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
6446 check_added_monitors!(nodes[0], 1);
6448 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6449 assert_eq!(events.len(), 1);
6450 SendEvent::from_event(events.remove(0))
6452 assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
6454 if messages_delivered < 2 {
6455 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
6457 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6458 if messages_delivered >= 3 {
6459 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
6460 check_added_monitors!(nodes[1], 1);
6461 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6463 if messages_delivered >= 4 {
6464 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6465 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6466 check_added_monitors!(nodes[0], 1);
6468 if messages_delivered >= 5 {
6469 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
6470 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
6471 // No commitment_signed so get_event_msg's assert(len == 1) passes
6472 check_added_monitors!(nodes[0], 1);
6474 if messages_delivered >= 6 {
6475 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6476 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
6477 check_added_monitors!(nodes[1], 1);
6484 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6485 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6486 if messages_delivered < 3 {
6487 // Even if the funding_locked messages get exchanged, as long as nothing further was
6488 // received on either side, both sides will need to resend them.
6489 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
6490 } else if messages_delivered == 3 {
6491 // nodes[0] still wants its RAA + commitment_signed
6492 reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
6493 } else if messages_delivered == 4 {
6494 // nodes[0] still wants its commitment_signed
6495 reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
6496 } else if messages_delivered == 5 {
6497 // nodes[1] still wants its final RAA
6498 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
6499 } else if messages_delivered == 6 {
6500 // Everything was delivered...
6501 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6504 let events_1 = nodes[1].node.get_and_clear_pending_events();
6505 assert_eq!(events_1.len(), 1);
6507 Event::PendingHTLCsForwardable { .. } => { },
6508 _ => panic!("Unexpected event"),
6511 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6512 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6513 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6515 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
6516 nodes[1].node.process_pending_htlc_forwards();
6518 let events_2 = nodes[1].node.get_and_clear_pending_events();
6519 assert_eq!(events_2.len(), 1);
6521 Event::PaymentReceived { ref payment_hash, amt } => {
6522 assert_eq!(payment_hash_1, *payment_hash);
6523 assert_eq!(amt, 1000000);
6525 _ => panic!("Unexpected event"),
6528 nodes[1].node.claim_funds(payment_preimage_1);
6529 check_added_monitors!(nodes[1], 1);
6531 let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
6532 assert_eq!(events_3.len(), 1);
6533 let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
6534 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
6535 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6536 assert!(updates.update_add_htlcs.is_empty());
6537 assert!(updates.update_fail_htlcs.is_empty());
6538 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
6539 assert!(updates.update_fail_malformed_htlcs.is_empty());
6540 assert!(updates.update_fee.is_none());
6541 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
6543 _ => panic!("Unexpected event"),
6546 if messages_delivered >= 1 {
6547 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
6549 let events_4 = nodes[0].node.get_and_clear_pending_events();
6550 assert_eq!(events_4.len(), 1);
6552 Event::PaymentSent { ref payment_preimage } => {
6553 assert_eq!(payment_preimage_1, *payment_preimage);
6555 _ => panic!("Unexpected event"),
6558 if messages_delivered >= 2 {
6559 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
6560 check_added_monitors!(nodes[0], 1);
6561 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6563 if messages_delivered >= 3 {
6564 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6565 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
6566 check_added_monitors!(nodes[1], 1);
6568 if messages_delivered >= 4 {
6569 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap();
6570 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6571 // No commitment_signed so get_event_msg's assert(len == 1) passes
6572 check_added_monitors!(nodes[1], 1);
6574 if messages_delivered >= 5 {
6575 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6576 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6577 check_added_monitors!(nodes[0], 1);
6584 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6585 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6586 if messages_delivered < 2 {
6587 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
6588 //TODO: Deduplicate PaymentSent events, then enable this if:
6589 //if messages_delivered < 1 {
6590 let events_4 = nodes[0].node.get_and_clear_pending_events();
6591 assert_eq!(events_4.len(), 1);
6593 Event::PaymentSent { ref payment_preimage } => {
6594 assert_eq!(payment_preimage_1, *payment_preimage);
6596 _ => panic!("Unexpected event"),
6599 } else if messages_delivered == 2 {
6600 // nodes[0] still wants its RAA + commitment_signed
6601 reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
6602 } else if messages_delivered == 3 {
6603 // nodes[0] still wants its commitment_signed
6604 reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
6605 } else if messages_delivered == 4 {
6606 // nodes[1] still wants its final RAA
6607 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
6608 } else if messages_delivered == 5 {
6609 // Everything was delivered...
6610 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6613 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6614 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6615 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6617 // Channel should still work fine...
6618 let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
6619 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
6623 fn test_drop_messages_peer_disconnect_a() {
6624 do_test_drop_messages_peer_disconnect(0);
6625 do_test_drop_messages_peer_disconnect(1);
6626 do_test_drop_messages_peer_disconnect(2);
6627 do_test_drop_messages_peer_disconnect(3);
6631 fn test_drop_messages_peer_disconnect_b() {
6632 do_test_drop_messages_peer_disconnect(4);
6633 do_test_drop_messages_peer_disconnect(5);
6634 do_test_drop_messages_peer_disconnect(6);
6638 fn test_funding_peer_disconnect() {
6639 // Test that we can lock in our funding tx while disconnected
6640 let nodes = create_network(2);
6641 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
6643 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6644 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6646 confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
6647 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
6648 assert_eq!(events_1.len(), 1);
6650 MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
6651 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
6653 _ => panic!("Unexpected event"),
6656 confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
6657 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6658 assert_eq!(events_2.len(), 1);
6660 MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
6661 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6663 _ => panic!("Unexpected event"),
6666 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6667 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6668 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6669 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6671 // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
6672 // rebroadcasting announcement_signatures upon reconnect.
6674 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6675 let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
6676 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
6680 fn test_drop_messages_peer_disconnect_dual_htlc() {
6681 // Test that we can handle reconnecting when both sides of a channel have pending
6682 // commitment_updates when we disconnect.
6683 let mut nodes = create_network(2);
6684 create_announced_chan_between_nodes(&nodes, 0, 1);
6686 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6688 // Now try to send a second payment which will fail to send
6689 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6690 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
6692 nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
6693 check_added_monitors!(nodes[0], 1);
6695 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
6696 assert_eq!(events_1.len(), 1);
6698 MessageSendEvent::UpdateHTLCs { .. } => {},
6699 _ => panic!("Unexpected event"),
6702 assert!(nodes[1].node.claim_funds(payment_preimage_1));
6703 check_added_monitors!(nodes[1], 1);
6705 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6706 assert_eq!(events_2.len(), 1);
6708 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6709 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6710 assert!(update_add_htlcs.is_empty());
6711 assert_eq!(update_fulfill_htlcs.len(), 1);
6712 assert!(update_fail_htlcs.is_empty());
6713 assert!(update_fail_malformed_htlcs.is_empty());
6714 assert!(update_fee.is_none());
6716 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
6717 let events_3 = nodes[0].node.get_and_clear_pending_events();
6718 assert_eq!(events_3.len(), 1);
6720 Event::PaymentSent { ref payment_preimage } => {
6721 assert_eq!(*payment_preimage, payment_preimage_1);
6723 _ => panic!("Unexpected event"),
6726 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
6727 let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
6728 // No commitment_signed so get_event_msg's assert(len == 1) passes
6729 check_added_monitors!(nodes[0], 1);
6731 _ => panic!("Unexpected event"),
6734 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6735 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6737 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
6738 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6739 assert_eq!(reestablish_1.len(), 1);
6740 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
6741 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6742 assert_eq!(reestablish_2.len(), 1);
6744 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
6745 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6746 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
6747 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6749 assert!(as_resp.0.is_none());
6750 assert!(bs_resp.0.is_none());
6752 assert!(bs_resp.1.is_none());
6753 assert!(bs_resp.2.is_none());
6755 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
6757 assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
6758 assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
6759 assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
6760 assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
6761 assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
6762 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap();
6763 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap();
6764 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6765 // No commitment_signed so get_event_msg's assert(len == 1) passes
6766 check_added_monitors!(nodes[1], 1);
6768 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap();
6769 let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6770 assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
6771 assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
6772 assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
6773 assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
6774 assert!(bs_second_commitment_signed.update_fee.is_none());
6775 check_added_monitors!(nodes[1], 1);
6777 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6778 let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6779 assert!(as_commitment_signed.update_add_htlcs.is_empty());
6780 assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
6781 assert!(as_commitment_signed.update_fail_htlcs.is_empty());
6782 assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
6783 assert!(as_commitment_signed.update_fee.is_none());
6784 check_added_monitors!(nodes[0], 1);
6786 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap();
6787 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
6788 // No commitment_signed so get_event_msg's assert(len == 1) passes
6789 check_added_monitors!(nodes[0], 1);
6791 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap();
6792 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6793 // No commitment_signed so get_event_msg's assert(len == 1) passes
6794 check_added_monitors!(nodes[1], 1);
6796 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6797 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
6798 check_added_monitors!(nodes[1], 1);
6800 let events_4 = nodes[1].node.get_and_clear_pending_events();
6801 assert_eq!(events_4.len(), 1);
6803 Event::PendingHTLCsForwardable { .. } => { },
6804 _ => panic!("Unexpected event"),
6807 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
6808 nodes[1].node.process_pending_htlc_forwards();
6810 let events_5 = nodes[1].node.get_and_clear_pending_events();
6811 assert_eq!(events_5.len(), 1);
6813 Event::PaymentReceived { ref payment_hash, amt: _ } => {
6814 assert_eq!(payment_hash_2, *payment_hash);
6816 _ => panic!("Unexpected event"),
6819 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
6820 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6821 check_added_monitors!(nodes[0], 1);
6823 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
6827 fn test_simple_monitor_permanent_update_fail() {
6828 // Test that we handle a simple permanent monitor update failure
6829 let mut nodes = create_network(2);
6830 create_announced_chan_between_nodes(&nodes, 0, 1);
6832 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6833 let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
6835 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
6836 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
6837 check_added_monitors!(nodes[0], 1);
6839 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
6840 assert_eq!(events_1.len(), 1);
6842 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6843 _ => panic!("Unexpected event"),
6846 // TODO: Once we hit the chain with the failure transaction we should check that we get a
6847 // PaymentFailed event
6849 assert_eq!(nodes[0].node.list_channels().len(), 0);
6852 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
6853 // Test that we can recover from a simple temporary monitor update failure optionally with
6854 // a disconnect in between
6855 let mut nodes = create_network(2);
6856 create_announced_chan_between_nodes(&nodes, 0, 1);
6858 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6859 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
6861 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
6862 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); }
6863 check_added_monitors!(nodes[0], 1);
6865 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
6866 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6867 assert_eq!(nodes[0].node.list_channels().len(), 1);
6870 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6871 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6872 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6875 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
6876 nodes[0].node.test_restore_channel_monitor();
6877 check_added_monitors!(nodes[0], 1);
6879 let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
6880 assert_eq!(events_2.len(), 1);
6881 let payment_event = SendEvent::from_event(events_2.pop().unwrap());
6882 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
6883 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6884 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6886 expect_pending_htlcs_forwardable!(nodes[1]);
6888 let events_3 = nodes[1].node.get_and_clear_pending_events();
6889 assert_eq!(events_3.len(), 1);
6891 Event::PaymentReceived { ref payment_hash, amt } => {
6892 assert_eq!(payment_hash_1, *payment_hash);
6893 assert_eq!(amt, 1000000);
6895 _ => panic!("Unexpected event"),
6898 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
6900 // Now set it to failed again...
6901 let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
6902 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
6903 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); }
6904 check_added_monitors!(nodes[0], 1);
6906 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
6907 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6908 assert_eq!(nodes[0].node.list_channels().len(), 1);
6911 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6912 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6913 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6916 // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
6917 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
6918 nodes[0].node.test_restore_channel_monitor();
6919 check_added_monitors!(nodes[0], 1);
6921 let events_5 = nodes[0].node.get_and_clear_pending_msg_events();
6922 assert_eq!(events_5.len(), 1);
6924 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6925 _ => panic!("Unexpected event"),
6928 // TODO: Once we hit the chain with the failure transaction we should check that we get a
6929 // PaymentFailed event
6931 assert_eq!(nodes[0].node.list_channels().len(), 0);
6935 fn test_simple_monitor_temporary_update_fail() {
6936 do_test_simple_monitor_temporary_update_fail(false);
6937 do_test_simple_monitor_temporary_update_fail(true);
6940 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
6941 let disconnect_flags = 8 | 16;
6943 // Test that we can recover from a temporary monitor update failure with some in-flight
6944 // HTLCs going on at the same time potentially with some disconnection thrown in.
6945 // * First we route a payment, then get a temporary monitor update failure when trying to
6946 // route a second payment. We then claim the first payment.
6947 // * If disconnect_count is set, we will disconnect at this point (which is likely as
6948 // TemporaryFailure likely indicates net disconnect which resulted in failing to update
6949 // the ChannelMonitor on a watchtower).
6950 // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
6951 // immediately, otherwise we wait sconnect and deliver them via the reconnect
6952 // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
6953 // disconnect_count & !disconnect_flags is 0).
6954 // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
6955 // through message sending, potentially disconnect/reconnecting multiple times based on
6956 // disconnect_count, to get the update_fulfill_htlc through.
6957 // * We then walk through more message exchanges to get the original update_add_htlc
6958 // through, swapping message ordering based on disconnect_count & 8 and optionally
6959 // disconnect/reconnecting based on disconnect_count.
6960 let mut nodes = create_network(2);
6961 create_announced_chan_between_nodes(&nodes, 0, 1);
6963 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6965 // Now try to send a second payment which will fail to send
6966 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6967 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
6969 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
6970 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); }
6971 check_added_monitors!(nodes[0], 1);
6973 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
6974 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6975 assert_eq!(nodes[0].node.list_channels().len(), 1);
6977 // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
6978 // but nodes[0] won't respond since it is frozen.
6979 assert!(nodes[1].node.claim_funds(payment_preimage_1));
6980 check_added_monitors!(nodes[1], 1);
6981 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6982 assert_eq!(events_2.len(), 1);
6983 let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
6984 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6985 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6986 assert!(update_add_htlcs.is_empty());
6987 assert_eq!(update_fulfill_htlcs.len(), 1);
6988 assert!(update_fail_htlcs.is_empty());
6989 assert!(update_fail_malformed_htlcs.is_empty());
6990 assert!(update_fee.is_none());
6992 if (disconnect_count & 16) == 0 {
6993 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
6994 let events_3 = nodes[0].node.get_and_clear_pending_events();
6995 assert_eq!(events_3.len(), 1);
6997 Event::PaymentSent { ref payment_preimage } => {
6998 assert_eq!(*payment_preimage, payment_preimage_1);
7000 _ => panic!("Unexpected event"),
7003 if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
7004 assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
7005 } else { panic!(); }
7008 (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
7010 _ => panic!("Unexpected event"),
7013 if disconnect_count & !disconnect_flags > 0 {
7014 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7015 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7018 // Now fix monitor updating...
7019 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
7020 nodes[0].node.test_restore_channel_monitor();
7021 check_added_monitors!(nodes[0], 1);
7023 macro_rules! disconnect_reconnect_peers { () => { {
7024 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7025 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7027 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
7028 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7029 assert_eq!(reestablish_1.len(), 1);
7030 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
7031 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7032 assert_eq!(reestablish_2.len(), 1);
7034 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
7035 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7036 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
7037 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7039 assert!(as_resp.0.is_none());
7040 assert!(bs_resp.0.is_none());
7042 (reestablish_1, reestablish_2, as_resp, bs_resp)
7045 let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
7046 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
7047 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7049 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
7050 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7051 assert_eq!(reestablish_1.len(), 1);
7052 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
7053 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7054 assert_eq!(reestablish_2.len(), 1);
7056 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
7057 check_added_monitors!(nodes[0], 0);
7058 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7059 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
7060 check_added_monitors!(nodes[1], 0);
7061 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7063 assert!(as_resp.0.is_none());
7064 assert!(bs_resp.0.is_none());
7066 assert!(bs_resp.1.is_none());
7067 if (disconnect_count & 16) == 0 {
7068 assert!(bs_resp.2.is_none());
7070 assert!(as_resp.1.is_some());
7071 assert!(as_resp.2.is_some());
7072 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
7074 assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
7075 assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
7076 assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
7077 assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
7078 assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
7079 assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
7081 assert!(as_resp.1.is_none());
7083 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap();
7084 let events_3 = nodes[0].node.get_and_clear_pending_events();
7085 assert_eq!(events_3.len(), 1);
7087 Event::PaymentSent { ref payment_preimage } => {
7088 assert_eq!(*payment_preimage, payment_preimage_1);
7090 _ => panic!("Unexpected event"),
7093 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap();
7094 let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
7095 // No commitment_signed so get_event_msg's assert(len == 1) passes
7096 check_added_monitors!(nodes[0], 1);
7098 as_resp.1 = Some(as_resp_raa);
7102 if disconnect_count & !disconnect_flags > 1 {
7103 let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
7105 if (disconnect_count & 16) == 0 {
7106 assert!(reestablish_1 == second_reestablish_1);
7107 assert!(reestablish_2 == second_reestablish_2);
7109 assert!(as_resp == second_as_resp);
7110 assert!(bs_resp == second_bs_resp);
7113 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
7115 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
7116 assert_eq!(events_4.len(), 2);
7117 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
7118 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
7119 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
7122 _ => panic!("Unexpected event"),
7126 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
7128 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
7129 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
7130 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
7131 // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
7132 check_added_monitors!(nodes[1], 1);
7134 if disconnect_count & !disconnect_flags > 2 {
7135 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7137 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
7138 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
7140 assert!(as_resp.2.is_none());
7141 assert!(bs_resp.2.is_none());
7144 let as_commitment_update;
7145 let bs_second_commitment_update;
7147 macro_rules! handle_bs_raa { () => {
7148 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
7149 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7150 assert!(as_commitment_update.update_add_htlcs.is_empty());
7151 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
7152 assert!(as_commitment_update.update_fail_htlcs.is_empty());
7153 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
7154 assert!(as_commitment_update.update_fee.is_none());
7155 check_added_monitors!(nodes[0], 1);
7158 macro_rules! handle_initial_raa { () => {
7159 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap();
7160 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7161 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
7162 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
7163 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
7164 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
7165 assert!(bs_second_commitment_update.update_fee.is_none());
7166 check_added_monitors!(nodes[1], 1);
7169 if (disconnect_count & 8) == 0 {
7172 if disconnect_count & !disconnect_flags > 3 {
7173 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7175 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
7176 assert!(bs_resp.1.is_none());
7178 assert!(as_resp.2.unwrap() == as_commitment_update);
7179 assert!(bs_resp.2.is_none());
7181 assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
7184 handle_initial_raa!();
7186 if disconnect_count & !disconnect_flags > 4 {
7187 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7189 assert!(as_resp.1.is_none());
7190 assert!(bs_resp.1.is_none());
7192 assert!(as_resp.2.unwrap() == as_commitment_update);
7193 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
7196 handle_initial_raa!();
7198 if disconnect_count & !disconnect_flags > 3 {
7199 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7201 assert!(as_resp.1.is_none());
7202 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
7204 assert!(as_resp.2.is_none());
7205 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
7207 assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
7212 if disconnect_count & !disconnect_flags > 4 {
7213 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7215 assert!(as_resp.1.is_none());
7216 assert!(bs_resp.1.is_none());
7218 assert!(as_resp.2.unwrap() == as_commitment_update);
7219 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
7223 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap();
7224 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
7225 // No commitment_signed so get_event_msg's assert(len == 1) passes
7226 check_added_monitors!(nodes[0], 1);
7228 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap();
7229 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
7230 // No commitment_signed so get_event_msg's assert(len == 1) passes
7231 check_added_monitors!(nodes[1], 1);
7233 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
7234 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7235 check_added_monitors!(nodes[1], 1);
7237 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
7238 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7239 check_added_monitors!(nodes[0], 1);
7241 expect_pending_htlcs_forwardable!(nodes[1]);
7243 let events_5 = nodes[1].node.get_and_clear_pending_events();
7244 assert_eq!(events_5.len(), 1);
7246 Event::PaymentReceived { ref payment_hash, amt } => {
7247 assert_eq!(payment_hash_2, *payment_hash);
7248 assert_eq!(amt, 1000000);
7250 _ => panic!("Unexpected event"),
7253 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
7257 fn test_monitor_temporary_update_fail_a() {
7258 do_test_monitor_temporary_update_fail(0);
7259 do_test_monitor_temporary_update_fail(1);
7260 do_test_monitor_temporary_update_fail(2);
7261 do_test_monitor_temporary_update_fail(3);
7262 do_test_monitor_temporary_update_fail(4);
7263 do_test_monitor_temporary_update_fail(5);
7267 fn test_monitor_temporary_update_fail_b() {
7268 do_test_monitor_temporary_update_fail(2 | 8);
7269 do_test_monitor_temporary_update_fail(3 | 8);
7270 do_test_monitor_temporary_update_fail(4 | 8);
7271 do_test_monitor_temporary_update_fail(5 | 8);
7275 fn test_monitor_temporary_update_fail_c() {
7276 do_test_monitor_temporary_update_fail(1 | 16);
7277 do_test_monitor_temporary_update_fail(2 | 16);
7278 do_test_monitor_temporary_update_fail(3 | 16);
7279 do_test_monitor_temporary_update_fail(2 | 8 | 16);
7280 do_test_monitor_temporary_update_fail(3 | 8 | 16);
7284 fn test_invalid_channel_announcement() {
7285 //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
7286 let secp_ctx = Secp256k1::new();
7287 let nodes = create_network(2);
7289 let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
7291 let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
7292 let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
7293 let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
7294 let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
7296 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
7298 let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
7299 let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
7301 let as_network_key = nodes[0].node.get_our_node_id();
7302 let bs_network_key = nodes[1].node.get_our_node_id();
7304 let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
7306 let mut chan_announcement;
7308 macro_rules! dummy_unsigned_msg {
7310 msgs::UnsignedChannelAnnouncement {
7311 features: msgs::GlobalFeatures::new(),
7312 chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
7313 short_channel_id: as_chan.get_short_channel_id().unwrap(),
7314 node_id_1: if were_node_one { as_network_key } else { bs_network_key },
7315 node_id_2: if were_node_one { bs_network_key } else { as_network_key },
7316 bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
7317 bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
7318 excess_data: Vec::new(),
7323 macro_rules! sign_msg {
7324 ($unsigned_msg: expr) => {
7325 let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
7326 let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
7327 let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
7328 let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key);
7329 let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key);
7330 chan_announcement = msgs::ChannelAnnouncement {
7331 node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
7332 node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
7333 bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
7334 bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
7335 contents: $unsigned_msg
7340 let unsigned_msg = dummy_unsigned_msg!();
7341 sign_msg!(unsigned_msg);
7342 assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
7343 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
7345 // Configured with Network::Testnet
7346 let mut unsigned_msg = dummy_unsigned_msg!();
7347 unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
7348 sign_msg!(unsigned_msg);
7349 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
7351 let mut unsigned_msg = dummy_unsigned_msg!();
7352 unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
7353 sign_msg!(unsigned_msg);
7354 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
7357 struct VecWriter(Vec<u8>);
7358 impl Writer for VecWriter {
7359 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
7360 self.0.extend_from_slice(buf);
7363 fn size_hint(&mut self, size: usize) {
7364 self.0.reserve_exact(size);
7369 fn test_no_txn_manager_serialize_deserialize() {
7370 let mut nodes = create_network(2);
7372 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
7374 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7376 let nodes_0_serialized = nodes[0].node.encode();
7377 let mut chan_0_monitor_serialized = VecWriter(Vec::new());
7378 nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
7380 nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
7381 let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
7382 let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
7383 assert!(chan_0_monitor_read.is_empty());
7385 let mut nodes_0_read = &nodes_0_serialized[..];
7386 let config = UserConfig::new();
7387 let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
7388 let (_, nodes_0_deserialized) = {
7389 let mut channel_monitors = HashMap::new();
7390 channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
7391 <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
7392 default_config: config,
7394 fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
7395 monitor: nodes[0].chan_monitor.clone(),
7396 chain_monitor: nodes[0].chain_monitor.clone(),
7397 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
7398 logger: Arc::new(test_utils::TestLogger::new()),
7399 channel_monitors: &channel_monitors,
7402 assert!(nodes_0_read.is_empty());
7404 assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
7405 nodes[0].node = Arc::new(nodes_0_deserialized);
7406 let nodes_0_as_listener: Arc<ChainListener> = nodes[0].node.clone();
7407 nodes[0].chain_monitor.register_listener(Arc::downgrade(&nodes_0_as_listener));
7408 assert_eq!(nodes[0].node.list_channels().len(), 1);
7409 check_added_monitors!(nodes[0], 1);
7411 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
7412 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7413 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
7414 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7416 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
7417 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7418 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
7419 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7421 let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
7422 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
7423 for node in nodes.iter() {
7424 assert!(node.router.handle_channel_announcement(&announcement).unwrap());
7425 node.router.handle_channel_update(&as_update).unwrap();
7426 node.router.handle_channel_update(&bs_update).unwrap();
7429 send_payment(&nodes[0], &[&nodes[1]], 1000000);
7433 fn test_simple_manager_serialize_deserialize() {
7434 let mut nodes = create_network(2);
7435 create_announced_chan_between_nodes(&nodes, 0, 1);
7437 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7438 let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7440 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7442 let nodes_0_serialized = nodes[0].node.encode();
7443 let mut chan_0_monitor_serialized = VecWriter(Vec::new());
7444 nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
7446 nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
7447 let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
7448 let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
7449 assert!(chan_0_monitor_read.is_empty());
7451 let mut nodes_0_read = &nodes_0_serialized[..];
7452 let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
7453 let (_, nodes_0_deserialized) = {
7454 let mut channel_monitors = HashMap::new();
7455 channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
7456 <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
7457 default_config: UserConfig::new(),
7459 fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
7460 monitor: nodes[0].chan_monitor.clone(),
7461 chain_monitor: nodes[0].chain_monitor.clone(),
7462 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
7463 logger: Arc::new(test_utils::TestLogger::new()),
7464 channel_monitors: &channel_monitors,
7467 assert!(nodes_0_read.is_empty());
7469 assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
7470 nodes[0].node = Arc::new(nodes_0_deserialized);
7471 check_added_monitors!(nodes[0], 1);
7473 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
7475 fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
7476 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
7480 fn test_manager_serialize_deserialize_inconsistent_monitor() {
7481 // Test deserializing a ChannelManager with a out-of-date ChannelMonitor
7482 let mut nodes = create_network(4);
7483 create_announced_chan_between_nodes(&nodes, 0, 1);
7484 create_announced_chan_between_nodes(&nodes, 2, 0);
7485 let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3);
7487 let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
7489 // Serialize the ChannelManager here, but the monitor we keep up-to-date
7490 let nodes_0_serialized = nodes[0].node.encode();
7492 route_payment(&nodes[0], &[&nodes[3]], 1000000);
7493 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7494 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7495 nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7497 // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
7499 let mut node_0_monitors_serialized = Vec::new();
7500 for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
7501 let mut writer = VecWriter(Vec::new());
7502 monitor.1.write_for_disk(&mut writer).unwrap();
7503 node_0_monitors_serialized.push(writer.0);
7506 nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
7507 let mut node_0_monitors = Vec::new();
7508 for serialized in node_0_monitors_serialized.iter() {
7509 let mut read = &serialized[..];
7510 let (_, monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut read, Arc::new(test_utils::TestLogger::new())).unwrap();
7511 assert!(read.is_empty());
7512 node_0_monitors.push(monitor);
7515 let mut nodes_0_read = &nodes_0_serialized[..];
7516 let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
7517 let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
7518 default_config: UserConfig::new(),
7520 fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
7521 monitor: nodes[0].chan_monitor.clone(),
7522 chain_monitor: nodes[0].chain_monitor.clone(),
7523 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
7524 logger: Arc::new(test_utils::TestLogger::new()),
7525 channel_monitors: &node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().unwrap(), monitor) }).collect(),
7527 assert!(nodes_0_read.is_empty());
7529 { // Channel close should result in a commitment tx and an HTLC tx
7530 let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7531 assert_eq!(txn.len(), 2);
7532 assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
7533 assert_eq!(txn[1].input[0].previous_output.txid, txn[0].txid());
7536 for monitor in node_0_monitors.drain(..) {
7537 assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
7538 check_added_monitors!(nodes[0], 1);
7540 nodes[0].node = Arc::new(nodes_0_deserialized);
7542 // nodes[1] and nodes[2] have no lost state with nodes[0]...
7543 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
7544 reconnect_nodes(&nodes[0], &nodes[2], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
7545 //... and we can even still claim the payment!
7546 claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
7548 nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id());
7549 let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
7550 nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id());
7551 if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
7552 assert_eq!(msg.channel_id, channel_id);
7553 } else { panic!("Unexpected result"); }