1 //! The top-level channel management and payment tracking stuff lives here.
3 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
4 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
5 //! upon reconnect to the relevant peer(s).
7 //! It does not manage routing logic (see ln::router for that) nor does it manage constructing
8 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
9 //! imply it needs to fail HTLCs/payments/channels it manages).
11 use bitcoin::blockdata::block::BlockHeader;
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::blockdata::constants::genesis_block;
14 use bitcoin::network::constants::Network;
15 use bitcoin::util::hash::{BitcoinHash, Sha256dHash};
17 use secp256k1::key::{SecretKey,PublicKey};
18 use secp256k1::{Secp256k1,Message};
19 use secp256k1::ecdh::SharedSecret;
22 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
23 use chain::transaction::OutPoint;
24 use ln::channel::{Channel, ChannelError};
25 use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
26 use ln::router::{Route,RouteHop};
28 use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
29 use chain::keysinterface::KeysInterface;
30 use util::config::UserConfig;
31 use util::{byte_utils, events, internal_traits, rng};
32 use util::sha2::Sha256;
33 use util::ser::{Readable, ReadableArgs, Writeable, Writer};
34 use util::chacha20poly1305rfc::ChaCha20;
35 use util::logger::Logger;
36 use util::errors::APIError;
39 use crypto::mac::{Mac,MacResult};
40 use crypto::hmac::Hmac;
41 use crypto::digest::Digest;
42 use crypto::symmetriccipher::SynchronousStreamCipher;
44 use std::{cmp, ptr, mem};
45 use std::collections::{HashMap, hash_map, HashSet};
47 use std::sync::{Arc, Mutex, MutexGuard, RwLock};
48 use std::sync::atomic::{AtomicUsize, Ordering};
49 use std::time::{Instant,Duration};
51 /// We hold various information about HTLC relay in the HTLC objects in Channel itself:
53 /// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
54 /// forward the HTLC with information it will give back to us when it does so, or if it should Fail
55 /// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
57 /// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
58 /// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
59 /// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
60 /// the HTLC backwards along the relevant path).
61 /// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
62 /// our payment, which we can use to decode errors or inform the user that the payment was sent.
63 mod channel_held_info {
65 use ln::router::Route;
66 use secp256k1::key::SecretKey;
68 /// Stores the info we will need to send when we want to forward an HTLC onwards
69 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
70 pub struct PendingForwardHTLCInfo {
71 pub(super) onion_packet: Option<msgs::OnionPacket>,
72 pub(super) incoming_shared_secret: [u8; 32],
73 pub(super) payment_hash: [u8; 32],
74 pub(super) short_channel_id: u64,
75 pub(super) amt_to_forward: u64,
76 pub(super) outgoing_cltv_value: u32,
79 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
80 pub enum HTLCFailureMsg {
81 Relay(msgs::UpdateFailHTLC),
82 Malformed(msgs::UpdateFailMalformedHTLC),
85 /// Stores whether we can't forward an HTLC or relevant forwarding info
86 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
87 pub enum PendingHTLCStatus {
88 Forward(PendingForwardHTLCInfo),
92 /// Tracks the inbound corresponding to an outbound HTLC
94 pub struct HTLCPreviousHopData {
95 pub(super) short_channel_id: u64,
96 pub(super) htlc_id: u64,
97 pub(super) incoming_packet_shared_secret: [u8; 32],
100 /// Tracks the inbound corresponding to an outbound HTLC
102 pub enum HTLCSource {
103 PreviousHopData(HTLCPreviousHopData),
106 session_priv: SecretKey,
107 /// Technically we can recalculate this from the route, but we cache it here to avoid
108 /// doing a double-pass on route when we get a failure back
109 first_hop_htlc_msat: u64,
114 pub fn dummy() -> Self {
115 HTLCSource::OutboundRoute {
116 route: Route { hops: Vec::new() },
117 session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(),
118 first_hop_htlc_msat: 0,
123 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
124 pub(crate) enum HTLCFailReason {
126 err: msgs::OnionErrorPacket,
134 pub(super) use self::channel_held_info::*;
136 struct MsgHandleErrInternal {
137 err: msgs::HandleError,
138 needs_channel_force_close: bool,
140 impl MsgHandleErrInternal {
142 fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
146 action: Some(msgs::ErrorAction::SendErrorMessage {
147 msg: msgs::ErrorMessage {
149 data: err.to_string()
153 needs_channel_force_close: false,
157 fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self {
161 action: Some(msgs::ErrorAction::SendErrorMessage {
162 msg: msgs::ErrorMessage {
164 data: err.to_string()
168 needs_channel_force_close: true,
172 fn from_maybe_close(err: msgs::HandleError) -> Self {
173 Self { err, needs_channel_force_close: true }
176 fn from_no_close(err: msgs::HandleError) -> Self {
177 Self { err, needs_channel_force_close: false }
180 fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
183 ChannelError::Ignore(msg) => HandleError {
185 action: Some(msgs::ErrorAction::IgnoreError),
187 ChannelError::Close(msg) => HandleError {
189 action: Some(msgs::ErrorAction::SendErrorMessage {
190 msg: msgs::ErrorMessage {
192 data: msg.to_string()
197 needs_channel_force_close: false,
201 fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
204 ChannelError::Ignore(msg) => HandleError {
206 action: Some(msgs::ErrorAction::IgnoreError),
208 ChannelError::Close(msg) => HandleError {
210 action: Some(msgs::ErrorAction::SendErrorMessage {
211 msg: msgs::ErrorMessage {
213 data: msg.to_string()
218 needs_channel_force_close: true,
223 /// Pass to fail_htlc_backwwards to indicate the reason to fail the payment
224 /// after a PaymentReceived event.
226 pub enum PaymentFailReason {
227 /// Indicate the preimage for payment_hash is not known after a PaymentReceived event
229 /// Indicate the payment amount is incorrect ( received is < expected or > 2*expected ) after a PaymentReceived event
233 /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
234 /// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
235 /// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
236 /// probably increase this significantly.
237 const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50;
239 struct HTLCForwardInfo {
240 prev_short_channel_id: u64,
242 forward_info: PendingForwardHTLCInfo,
245 /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
246 /// be sent in the order they appear in the return value, however sometimes the order needs to be
247 /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
248 /// they were originally sent). In those cases, this enum is also returned.
249 #[derive(Clone, PartialEq)]
250 pub(super) enum RAACommitmentOrder {
251 /// Send the CommitmentUpdate messages first
253 /// Send the RevokeAndACK message first
257 struct ChannelHolder {
258 by_id: HashMap<[u8; 32], Channel>,
259 short_to_id: HashMap<u64, [u8; 32]>,
260 next_forward: Instant,
261 /// short channel id -> forward infos. Key of 0 means payments received
262 /// Note that while this is held in the same mutex as the channels themselves, no consistency
263 /// guarantees are made about there existing a channel with the short id here, nor the short
264 /// ids in the PendingForwardHTLCInfo!
265 forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
266 /// Note that while this is held in the same mutex as the channels themselves, no consistency
267 /// guarantees are made about the channels given here actually existing anymore by the time you
269 claimable_htlcs: HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
270 /// Messages to send to peers - pushed to in the same lock that they are generated in (except
271 /// for broadcast messages, where ordering isn't as strict).
272 pending_msg_events: Vec<events::MessageSendEvent>,
274 struct MutChannelHolder<'a> {
275 by_id: &'a mut HashMap<[u8; 32], Channel>,
276 short_to_id: &'a mut HashMap<u64, [u8; 32]>,
277 next_forward: &'a mut Instant,
278 forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
279 claimable_htlcs: &'a mut HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
280 pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
283 fn borrow_parts(&mut self) -> MutChannelHolder {
285 by_id: &mut self.by_id,
286 short_to_id: &mut self.short_to_id,
287 next_forward: &mut self.next_forward,
288 forward_htlcs: &mut self.forward_htlcs,
289 claimable_htlcs: &mut self.claimable_htlcs,
290 pending_msg_events: &mut self.pending_msg_events,
295 #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
296 const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
298 /// Manager which keeps track of a number of channels and sends messages to the appropriate
299 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
301 /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
302 /// to individual Channels.
304 /// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
305 /// all peers during write/read (though does not modify this instance, only the instance being
306 /// serialized). This will result in any channels which have not yet exchanged funding_created (ie
307 /// called funding_transaction_generated for outbound channels).
309 /// Note that you can be a bit lazier about writing out ChannelManager than you can be with
310 /// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
311 /// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
312 /// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
313 /// the serialization process). If the deserialized version is out-of-date compared to the
314 /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
315 /// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
317 /// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
318 /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
319 /// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
320 /// block_connected() to step towards your best block) upon deserialization before using the
322 pub struct ChannelManager {
323 default_configuration: UserConfig,
324 genesis_hash: Sha256dHash,
325 fee_estimator: Arc<FeeEstimator>,
326 monitor: Arc<ManyChannelMonitor>,
327 chain_monitor: Arc<ChainWatchInterface>,
328 tx_broadcaster: Arc<BroadcasterInterface>,
330 latest_block_height: AtomicUsize,
331 last_block_hash: Mutex<Sha256dHash>,
332 secp_ctx: Secp256k1<secp256k1::All>,
334 channel_state: Mutex<ChannelHolder>,
335 our_network_key: SecretKey,
337 pending_events: Mutex<Vec<events::Event>>,
338 /// Used when we have to take a BIG lock to make sure everything is self-consistent.
339 /// Essentially just when we're serializing ourselves out.
340 /// Taken first everywhere where we are making changes before any other locks.
341 total_consistency_lock: RwLock<()>,
343 keys_manager: Arc<KeysInterface>,
348 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
349 /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
350 /// ie the node we forwarded the payment on to should always have enough room to reliably time out
351 /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
352 /// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
353 const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
354 const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
356 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
357 // if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
358 // HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the
359 // CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC.
362 const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER;
364 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
365 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
368 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
370 macro_rules! secp_call {
371 ( $res: expr, $err: expr ) => {
374 Err(_) => return Err($err),
381 shared_secret: SharedSecret,
383 blinding_factor: [u8; 32],
384 ephemeral_pubkey: PublicKey,
389 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
390 pub struct ChannelDetails {
391 /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
392 /// thereafter this is the txid of the funding transaction xor the funding transaction output).
393 /// Note that this means this value is *not* persistent - it can change once during the
394 /// lifetime of the channel.
395 pub channel_id: [u8; 32],
396 /// The position of the funding transaction in the chain. None if the funding transaction has
397 /// not yet been confirmed and the channel fully opened.
398 pub short_channel_id: Option<u64>,
399 /// The node_id of our counterparty
400 pub remote_network_id: PublicKey,
401 /// The value, in satoshis, of this channel as appears in the funding output
402 pub channel_value_satoshis: u64,
403 /// The user_id passed in to create_channel, or 0 if the channel was inbound.
407 impl ChannelManager {
408 /// Constructs a new ChannelManager to hold several channels and route between them.
410 /// This is the main "logic hub" for all channel-related actions, and implements
411 /// ChannelMessageHandler.
413 /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
415 /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
416 pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig) -> Result<Arc<ChannelManager>, secp256k1::Error> {
417 let secp_ctx = Secp256k1::new();
419 let res = Arc::new(ChannelManager {
420 default_configuration: config.clone(),
421 genesis_hash: genesis_block(network).header.bitcoin_hash(),
422 fee_estimator: feeest.clone(),
423 monitor: monitor.clone(),
427 latest_block_height: AtomicUsize::new(0), //TODO: Get an init value
428 last_block_hash: Mutex::new(Default::default()),
431 channel_state: Mutex::new(ChannelHolder{
432 by_id: HashMap::new(),
433 short_to_id: HashMap::new(),
434 next_forward: Instant::now(),
435 forward_htlcs: HashMap::new(),
436 claimable_htlcs: HashMap::new(),
437 pending_msg_events: Vec::new(),
439 our_network_key: keys_manager.get_node_secret(),
441 pending_events: Mutex::new(Vec::new()),
442 total_consistency_lock: RwLock::new(()),
448 let weak_res = Arc::downgrade(&res);
449 res.chain_monitor.register_listener(weak_res);
453 /// Creates a new outbound channel to the given remote node and with the given value.
455 /// user_id will be provided back as user_channel_id in FundingGenerationReady and
456 /// FundingBroadcastSafe events to allow tracking of which events correspond with which
457 /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
458 /// may wish to avoid using 0 for user_id here.
460 /// If successful, will generate a SendOpenChannel message event, so you should probably poll
461 /// PeerManager::process_events afterwards.
463 /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
464 /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
465 pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
466 if channel_value_satoshis < 1000 {
467 return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
470 let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?;
471 let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
473 let _ = self.total_consistency_lock.read().unwrap();
474 let mut channel_state = self.channel_state.lock().unwrap();
475 match channel_state.by_id.entry(channel.channel_id()) {
476 hash_map::Entry::Occupied(_) => {
477 if cfg!(feature = "fuzztarget") {
478 return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
480 panic!("RNG is bad???");
483 hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
485 channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
486 node_id: their_network_key,
492 /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
493 /// more information.
494 pub fn list_channels(&self) -> Vec<ChannelDetails> {
495 let channel_state = self.channel_state.lock().unwrap();
496 let mut res = Vec::with_capacity(channel_state.by_id.len());
497 for (channel_id, channel) in channel_state.by_id.iter() {
498 res.push(ChannelDetails {
499 channel_id: (*channel_id).clone(),
500 short_channel_id: channel.get_short_channel_id(),
501 remote_network_id: channel.get_their_node_id(),
502 channel_value_satoshis: channel.get_value_satoshis(),
503 user_id: channel.get_user_id(),
509 /// Gets the list of usable channels, in random order. Useful as an argument to
510 /// Router::get_route to ensure non-announced channels are used.
511 pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
512 let channel_state = self.channel_state.lock().unwrap();
513 let mut res = Vec::with_capacity(channel_state.by_id.len());
514 for (channel_id, channel) in channel_state.by_id.iter() {
515 // Note we use is_live here instead of usable which leads to somewhat confused
516 // internal/external nomenclature, but that's ok cause that's probably what the user
517 // really wanted anyway.
518 if channel.is_live() {
519 res.push(ChannelDetails {
520 channel_id: (*channel_id).clone(),
521 short_channel_id: channel.get_short_channel_id(),
522 remote_network_id: channel.get_their_node_id(),
523 channel_value_satoshis: channel.get_value_satoshis(),
524 user_id: channel.get_user_id(),
531 /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
532 /// will be accepted on the given channel, and after additional timeout/the closing of all
533 /// pending HTLCs, the channel will be closed on chain.
535 /// May generate a SendShutdown message event on success, which should be relayed.
536 pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
537 let _ = self.total_consistency_lock.read().unwrap();
539 let (mut failed_htlcs, chan_option) = {
540 let mut channel_state_lock = self.channel_state.lock().unwrap();
541 let channel_state = channel_state_lock.borrow_parts();
542 match channel_state.by_id.entry(channel_id.clone()) {
543 hash_map::Entry::Occupied(mut chan_entry) => {
544 let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
545 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
546 node_id: chan_entry.get().get_their_node_id(),
549 if chan_entry.get().is_shutdown() {
550 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
551 channel_state.short_to_id.remove(&short_id);
553 (failed_htlcs, Some(chan_entry.remove_entry().1))
554 } else { (failed_htlcs, None) }
556 hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
559 for htlc_source in failed_htlcs.drain(..) {
560 // unknown_next_peer...I dunno who that is anymore....
561 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
563 let chan_update = if let Some(chan) = chan_option {
564 if let Ok(update) = self.get_channel_update(&chan) {
569 if let Some(update) = chan_update {
570 let mut channel_state = self.channel_state.lock().unwrap();
571 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
580 fn finish_force_close_channel(&self, shutdown_res: (Vec<Transaction>, Vec<(HTLCSource, [u8; 32])>)) {
581 let (local_txn, mut failed_htlcs) = shutdown_res;
582 for htlc_source in failed_htlcs.drain(..) {
583 // unknown_next_peer...I dunno who that is anymore....
584 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
586 for tx in local_txn {
587 self.tx_broadcaster.broadcast_transaction(&tx);
589 //TODO: We need to have a way where outbound HTLC claims can result in us claiming the
590 //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
591 //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
592 //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
593 //timeouts are hit and our claims confirm).
594 //TODO: In any case, we need to make sure we remove any pending htlc tracking (via
595 //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
598 /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
599 /// the chain and rejecting new HTLCs on the given channel.
600 pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
601 let _ = self.total_consistency_lock.read().unwrap();
604 let mut channel_state_lock = self.channel_state.lock().unwrap();
605 let channel_state = channel_state_lock.borrow_parts();
606 if let Some(chan) = channel_state.by_id.remove(channel_id) {
607 if let Some(short_id) = chan.get_short_channel_id() {
608 channel_state.short_to_id.remove(&short_id);
615 self.finish_force_close_channel(chan.force_shutdown());
616 if let Ok(update) = self.get_channel_update(&chan) {
617 let mut channel_state = self.channel_state.lock().unwrap();
618 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
624 /// Force close all channels, immediately broadcasting the latest local commitment transaction
625 /// for each to the chain and rejecting new HTLCs on each.
626 pub fn force_close_all_channels(&self) {
627 for chan in self.list_channels() {
628 self.force_close_channel(&chan.channel_id);
632 fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) {
634 ChannelMonitorUpdateErr::PermanentFailure => {
636 let channel_state = channel_state_lock.borrow_parts();
637 let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
638 if let Some(short_id) = chan.get_short_channel_id() {
639 channel_state.short_to_id.remove(&short_id);
643 mem::drop(channel_state_lock);
644 self.finish_force_close_channel(chan.force_shutdown());
645 if let Ok(update) = self.get_channel_update(&chan) {
646 let mut channel_state = self.channel_state.lock().unwrap();
647 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
652 ChannelMonitorUpdateErr::TemporaryFailure => {
653 let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
654 channel.monitor_update_failed(reason);
660 fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
661 assert_eq!(shared_secret.len(), 32);
663 let mut hmac = Hmac::new(Sha256::new(), &[0x72, 0x68, 0x6f]); // rho
664 hmac.input(&shared_secret[..]);
665 let mut res = [0; 32];
666 hmac.raw_result(&mut res);
670 let mut hmac = Hmac::new(Sha256::new(), &[0x6d, 0x75]); // mu
671 hmac.input(&shared_secret[..]);
672 let mut res = [0; 32];
673 hmac.raw_result(&mut res);
679 fn gen_um_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
680 assert_eq!(shared_secret.len(), 32);
681 let mut hmac = Hmac::new(Sha256::new(), &[0x75, 0x6d]); // um
682 hmac.input(&shared_secret[..]);
683 let mut res = [0; 32];
684 hmac.raw_result(&mut res);
689 fn gen_ammag_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
690 assert_eq!(shared_secret.len(), 32);
691 let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
692 hmac.input(&shared_secret[..]);
693 let mut res = [0; 32];
694 hmac.raw_result(&mut res);
698 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
700 fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(SharedSecret, [u8; 32], PublicKey, &RouteHop)> (secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey, mut callback: FType) -> Result<(), secp256k1::Error> {
701 let mut blinded_priv = session_priv.clone();
702 let mut blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
704 for hop in route.hops.iter() {
705 let shared_secret = SharedSecret::new(secp_ctx, &hop.pubkey, &blinded_priv);
707 let mut sha = Sha256::new();
708 sha.input(&blinded_pub.serialize()[..]);
709 sha.input(&shared_secret[..]);
710 let mut blinding_factor = [0u8; 32];
711 sha.result(&mut blinding_factor);
713 let ephemeral_pubkey = blinded_pub;
715 blinded_priv.mul_assign(secp_ctx, &SecretKey::from_slice(secp_ctx, &blinding_factor)?)?;
716 blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
718 callback(shared_secret, blinding_factor, ephemeral_pubkey, hop);
724 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
725 fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, secp256k1::Error> {
726 let mut res = Vec::with_capacity(route.hops.len());
728 Self::construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _| {
729 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret[..]);
735 blinding_factor: _blinding_factor,
745 /// returns the hop data, as well as the first-hop value_msat and CLTV value we should send.
746 fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec<msgs::OnionHopData>, u64, u32), APIError> {
747 let mut cur_value_msat = 0u64;
748 let mut cur_cltv = starting_htlc_offset;
749 let mut last_short_channel_id = 0;
750 let mut res: Vec<msgs::OnionHopData> = Vec::with_capacity(route.hops.len());
751 internal_traits::test_no_dealloc::<msgs::OnionHopData>(None);
752 unsafe { res.set_len(route.hops.len()); }
754 for (idx, hop) in route.hops.iter().enumerate().rev() {
755 // First hop gets special values so that it can check, on receipt, that everything is
756 // exactly as it should be (and the next hop isn't trying to probe to find out if we're
757 // the intended recipient).
758 let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
759 let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
760 res[idx] = msgs::OnionHopData {
762 data: msgs::OnionRealm0HopData {
763 short_channel_id: last_short_channel_id,
764 amt_to_forward: value_msat,
765 outgoing_cltv_value: cltv,
769 cur_value_msat += hop.fee_msat;
770 if cur_value_msat >= 21000000 * 100000000 * 1000 {
771 return Err(APIError::RouteError{err: "Channel fees overflowed?!"});
773 cur_cltv += hop.cltv_expiry_delta as u32;
774 if cur_cltv >= 500000000 {
775 return Err(APIError::RouteError{err: "Channel CLTV overflowed?!"});
777 last_short_channel_id = hop.short_channel_id;
779 Ok((res, cur_value_msat, cur_cltv))
783 fn shift_arr_right(arr: &mut [u8; 20*65]) {
785 ptr::copy(arr[0..].as_ptr(), arr[65..].as_mut_ptr(), 19*65);
793 fn xor_bufs(dst: &mut[u8], src: &[u8]) {
794 assert_eq!(dst.len(), src.len());
796 for i in 0..dst.len() {
801 const ZERO:[u8; 21*65] = [0; 21*65];
802 fn construct_onion_packet(mut payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, associated_data: &[u8; 32]) -> msgs::OnionPacket {
803 let mut buf = Vec::with_capacity(21*65);
804 buf.resize(21*65, 0);
807 let iters = payloads.len() - 1;
808 let end_len = iters * 65;
809 let mut res = Vec::with_capacity(end_len);
810 res.resize(end_len, 0);
812 for (i, keys) in onion_keys.iter().enumerate() {
813 if i == payloads.len() - 1 { continue; }
814 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
815 chacha.process(&ChannelManager::ZERO, &mut buf); // We don't have a seek function :(
816 ChannelManager::xor_bufs(&mut res[0..(i + 1)*65], &buf[(20 - i)*65..21*65]);
821 let mut packet_data = [0; 20*65];
822 let mut hmac_res = [0; 32];
824 for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
825 ChannelManager::shift_arr_right(&mut packet_data);
826 payload.hmac = hmac_res;
827 packet_data[0..65].copy_from_slice(&payload.encode()[..]);
829 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
830 chacha.process(&packet_data, &mut buf[0..20*65]);
831 packet_data[..].copy_from_slice(&buf[0..20*65]);
834 packet_data[20*65 - filler.len()..20*65].copy_from_slice(&filler[..]);
837 let mut hmac = Hmac::new(Sha256::new(), &keys.mu);
838 hmac.input(&packet_data);
839 hmac.input(&associated_data[..]);
840 hmac.raw_result(&mut hmac_res);
845 public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
846 hop_data: packet_data,
851 /// Encrypts a failure packet. raw_packet can either be a
852 /// msgs::DecodedOnionErrorPacket.encode() result or a msgs::OnionErrorPacket.data element.
853 fn encrypt_failure_packet(shared_secret: &[u8], raw_packet: &[u8]) -> msgs::OnionErrorPacket {
854 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
856 let mut packet_crypted = Vec::with_capacity(raw_packet.len());
857 packet_crypted.resize(raw_packet.len(), 0);
858 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
859 chacha.process(&raw_packet, &mut packet_crypted[..]);
860 msgs::OnionErrorPacket {
861 data: packet_crypted,
865 fn build_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket {
866 assert_eq!(shared_secret.len(), 32);
867 assert!(failure_data.len() <= 256 - 2);
869 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
872 let mut res = Vec::with_capacity(2 + failure_data.len());
873 res.push(((failure_type >> 8) & 0xff) as u8);
874 res.push(((failure_type >> 0) & 0xff) as u8);
875 res.extend_from_slice(&failure_data[..]);
879 let mut res = Vec::with_capacity(256 - 2 - failure_data.len());
880 res.resize(256 - 2 - failure_data.len(), 0);
883 let mut packet = msgs::DecodedOnionErrorPacket {
885 failuremsg: failuremsg,
889 let mut hmac = Hmac::new(Sha256::new(), &um);
890 hmac.input(&packet.encode()[32..]);
891 hmac.raw_result(&mut packet.hmac);
897 fn build_first_hop_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
898 let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data);
899 ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
902 fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder>) {
903 macro_rules! get_onion_hash {
906 let mut sha = Sha256::new();
907 sha.input(&msg.onion_routing_packet.hop_data);
908 let mut onion_hash = [0; 32];
909 sha.result(&mut onion_hash);
915 if let Err(_) = msg.onion_routing_packet.public_key {
916 log_info!(self, "Failed to accept/forward incoming HTLC with invalid ephemeral pubkey");
917 return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
918 channel_id: msg.channel_id,
919 htlc_id: msg.htlc_id,
920 sha256_of_onion: get_onion_hash!(),
921 failure_code: 0x8000 | 0x4000 | 6,
922 })), self.channel_state.lock().unwrap());
925 let shared_secret = {
926 let mut arr = [0; 32];
927 arr.copy_from_slice(&SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
930 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
932 let mut channel_state = None;
933 macro_rules! return_err {
934 ($msg: expr, $err_code: expr, $data: expr) => {
936 log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
937 if channel_state.is_none() {
938 channel_state = Some(self.channel_state.lock().unwrap());
940 return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
941 channel_id: msg.channel_id,
942 htlc_id: msg.htlc_id,
943 reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
944 })), channel_state.unwrap());
949 if msg.onion_routing_packet.version != 0 {
950 //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
951 //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
952 //the hash doesn't really serve any purpuse - in the case of hashing all data, the
953 //receiving node would have to brute force to figure out which version was put in the
954 //packet by the node that send us the message, in the case of hashing the hop_data, the
955 //node knows the HMAC matched, so they already know what is there...
956 return_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4, &get_onion_hash!());
959 let mut hmac = Hmac::new(Sha256::new(), &mu);
960 hmac.input(&msg.onion_routing_packet.hop_data);
961 hmac.input(&msg.payment_hash);
962 if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) {
963 return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!());
966 let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
967 let next_hop_data = {
968 let mut decoded = [0; 65];
969 chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
970 match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
972 let error_code = match err {
973 msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
974 _ => 0x2000 | 2, // Should never happen
976 return_err!("Unable to decode our hop data", error_code, &[0;0]);
982 let pending_forward_info = if next_hop_data.hmac == [0; 32] {
984 // final_expiry_too_soon
985 if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
986 return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
988 // final_incorrect_htlc_amount
989 if next_hop_data.data.amt_to_forward > msg.amount_msat {
990 return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
992 // final_incorrect_cltv_expiry
993 if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
994 return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
997 // Note that we could obviously respond immediately with an update_fulfill_htlc
998 // message, however that would leak that we are the recipient of this payment, so
999 // instead we stay symmetric with the forwarding case, only responding (after a
1000 // delay) once they've send us a commitment_signed!
1002 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
1004 payment_hash: msg.payment_hash.clone(),
1005 short_channel_id: 0,
1006 incoming_shared_secret: shared_secret,
1007 amt_to_forward: next_hop_data.data.amt_to_forward,
1008 outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
1011 let mut new_packet_data = [0; 20*65];
1012 chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
1013 chacha.process(&ChannelManager::ZERO[0..65], &mut new_packet_data[19*65..]);
1015 let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
1017 let blinding_factor = {
1018 let mut sha = Sha256::new();
1019 sha.input(&new_pubkey.serialize()[..]);
1020 sha.input(&shared_secret);
1021 let mut res = [0u8; 32];
1022 sha.result(&mut res);
1023 match SecretKey::from_slice(&self.secp_ctx, &res) {
1025 return_err!("Blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
1031 if let Err(_) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) {
1032 return_err!("New blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
1035 let outgoing_packet = msgs::OnionPacket {
1037 public_key: Ok(new_pubkey),
1038 hop_data: new_packet_data,
1039 hmac: next_hop_data.hmac.clone(),
1042 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
1043 onion_packet: Some(outgoing_packet),
1044 payment_hash: msg.payment_hash.clone(),
1045 short_channel_id: next_hop_data.data.short_channel_id,
1046 incoming_shared_secret: shared_secret,
1047 amt_to_forward: next_hop_data.data.amt_to_forward,
1048 outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
1052 channel_state = Some(self.channel_state.lock().unwrap());
1053 if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
1054 if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
1055 let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
1056 let forwarding_id = match id_option {
1057 None => { // unknown_next_peer
1058 return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
1060 Some(id) => id.clone(),
1062 if let Some((err, code, chan_update)) = loop {
1063 let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
1065 // Note that we could technically not return an error yet here and just hope
1066 // that the connection is reestablished or monitor updated by the time we get
1067 // around to doing the actual forward, but better to fail early if we can and
1068 // hopefully an attacker trying to path-trace payments cannot make this occur
1069 // on a small/per-node/per-channel scale.
1070 if !chan.is_live() { // channel_disabled
1071 break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
1073 if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
1074 break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
1076 let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
1077 if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
1078 break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
1080 if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
1081 break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
1083 let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1084 // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
1085 if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
1086 break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
1088 if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
1089 break Some(("CLTV expiry is too far in the future", 21, None));
1094 let mut res = Vec::with_capacity(8 + 128);
1095 if code == 0x1000 | 11 || code == 0x1000 | 12 {
1096 res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
1098 else if code == 0x1000 | 13 {
1099 res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
1101 if let Some(chan_update) = chan_update {
1102 res.extend_from_slice(&chan_update.encode_with_len()[..]);
1104 return_err!(err, code, &res[..]);
1109 (pending_forward_info, channel_state.unwrap())
1112 /// only fails if the channel does not yet have an assigned short_id
1113 /// May be called with channel_state already locked!
1114 fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
1115 let short_channel_id = match chan.get_short_channel_id() {
1116 None => return Err(HandleError{err: "Channel not yet established", action: None}),
1120 let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
1122 let unsigned = msgs::UnsignedChannelUpdate {
1123 chain_hash: self.genesis_hash,
1124 short_channel_id: short_channel_id,
1125 timestamp: chan.get_channel_update_count(),
1126 flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
1127 cltv_expiry_delta: CLTV_EXPIRY_DELTA,
1128 htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
1129 fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
1130 fee_proportional_millionths: chan.get_fee_proportional_millionths(),
1131 excess_data: Vec::new(),
1134 let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
1135 let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key);
1137 Ok(msgs::ChannelUpdate {
1143 /// Sends a payment along a given route.
1145 /// Value parameters are provided via the last hop in route, see documentation for RouteHop
1146 /// fields for more info.
1148 /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
1149 /// payment), we don't do anything to stop you! We always try to ensure that if the provided
1150 /// next hop knows the preimage to payment_hash they can claim an additional amount as
1151 /// specified in the last hop in the route! Thus, you should probably do your own
1152 /// payment_preimage tracking (which you should already be doing as they represent "proof of
1153 /// payment") and prevent double-sends yourself.
1155 /// May generate a SendHTLCs message event on success, which should be relayed.
1157 /// Raises APIError::RoutError when invalid route or forward parameter
1158 /// (cltv_delta, fee, node public key) is specified
1159 pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
1160 if route.hops.len() < 1 || route.hops.len() > 20 {
1161 return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
1163 let our_node_id = self.get_our_node_id();
1164 for (idx, hop) in route.hops.iter().enumerate() {
1165 if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
1166 return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
1170 let session_priv = SecretKey::from_slice(&self.secp_ctx, &{
1171 let mut session_key = [0; 32];
1172 rng::fill_bytes(&mut session_key);
1174 }).expect("RNG is bad!");
1176 let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1178 let onion_keys = secp_call!(ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
1179 APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
1180 let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
1181 let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
1183 let _ = self.total_consistency_lock.read().unwrap();
1184 let mut channel_state = self.channel_state.lock().unwrap();
1186 let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
1187 None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
1188 Some(id) => id.clone(),
1192 let chan = channel_state.by_id.get_mut(&id).unwrap();
1193 if chan.get_their_node_id() != route.hops.first().unwrap().pubkey {
1194 return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
1196 if chan.is_awaiting_monitor_update() {
1197 return Err(APIError::MonitorUpdateFailed);
1199 if !chan.is_live() {
1200 return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
1202 chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
1203 route: route.clone(),
1204 session_priv: session_priv.clone(),
1205 first_hop_htlc_msat: htlc_msat,
1206 }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
1209 Some((update_add, commitment_signed, chan_monitor)) => {
1210 if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1211 self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst);
1212 return Err(APIError::MonitorUpdateFailed);
1215 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1216 node_id: route.hops.first().unwrap().pubkey,
1217 updates: msgs::CommitmentUpdate {
1218 update_add_htlcs: vec![update_add],
1219 update_fulfill_htlcs: Vec::new(),
1220 update_fail_htlcs: Vec::new(),
1221 update_fail_malformed_htlcs: Vec::new(),
1233 /// Call this upon creation of a funding transaction for the given channel.
1235 /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
1236 /// or your counterparty can steal your funds!
1238 /// Panics if a funding transaction has already been provided for this channel.
1240 /// May panic if the funding_txo is duplicative with some other channel (note that this should
1241 /// be trivially prevented by using unique funding transaction keys per-channel).
1242 pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
1243 let _ = self.total_consistency_lock.read().unwrap();
1245 let (chan, msg, chan_monitor) = {
1246 let mut channel_state = self.channel_state.lock().unwrap();
1247 match channel_state.by_id.remove(temporary_channel_id) {
1249 match chan.get_outbound_funding_created(funding_txo) {
1250 Ok(funding_msg) => {
1251 (chan, funding_msg.0, funding_msg.1)
1254 log_error!(self, "Got bad signatures: {}!", e.err);
1255 channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
1256 node_id: chan.get_their_node_id(),
1266 // Because we have exclusive ownership of the channel here we can release the channel_state
1267 // lock before add_update_monitor
1268 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1272 let mut channel_state = self.channel_state.lock().unwrap();
1273 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
1274 node_id: chan.get_their_node_id(),
1277 match channel_state.by_id.entry(chan.channel_id()) {
1278 hash_map::Entry::Occupied(_) => {
1279 panic!("Generated duplicate funding txid?");
1281 hash_map::Entry::Vacant(e) => {
1287 fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
1288 if !chan.should_announce() { return None }
1290 let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
1292 Err(_) => return None, // Only in case of state precondition violations eg channel is closing
1294 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
1295 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
1297 Some(msgs::AnnouncementSignatures {
1298 channel_id: chan.channel_id(),
1299 short_channel_id: chan.get_short_channel_id().unwrap(),
1300 node_signature: our_node_sig,
1301 bitcoin_signature: our_bitcoin_sig,
1305 /// Processes HTLCs which are pending waiting on random forward delay.
1307 /// Should only really ever be called in response to an PendingHTLCsForwardable event.
1308 /// Will likely generate further events.
1309 pub fn process_pending_htlc_forwards(&self) {
1310 let _ = self.total_consistency_lock.read().unwrap();
1312 let mut new_events = Vec::new();
1313 let mut failed_forwards = Vec::new();
1315 let mut channel_state_lock = self.channel_state.lock().unwrap();
1316 let channel_state = channel_state_lock.borrow_parts();
1318 if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward {
1322 for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
1323 if short_chan_id != 0 {
1324 let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
1325 Some(chan_id) => chan_id.clone(),
1327 failed_forwards.reserve(pending_forwards.len());
1328 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1329 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1330 short_channel_id: prev_short_channel_id,
1331 htlc_id: prev_htlc_id,
1332 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1334 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
1339 let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
1341 let mut add_htlc_msgs = Vec::new();
1342 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1343 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1344 short_channel_id: prev_short_channel_id,
1345 htlc_id: prev_htlc_id,
1346 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1348 match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
1350 let chan_update = self.get_channel_update(forward_chan).unwrap();
1351 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
1356 Some(msg) => { add_htlc_msgs.push(msg); },
1358 // Nothing to do here...we're waiting on a remote
1359 // revoke_and_ack before we can add anymore HTLCs. The Channel
1360 // will automatically handle building the update_add_htlc and
1361 // commitment_signed messages when we can.
1362 // TODO: Do some kind of timer to set the channel as !is_live()
1363 // as we don't really want others relying on us relaying through
1364 // this channel currently :/.
1371 if !add_htlc_msgs.is_empty() {
1372 let (commitment_msg, monitor) = match forward_chan.send_commitment() {
1375 if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action {
1376 } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action {
1378 panic!("Stated return value requirements in send_commitment() were not met");
1380 //TODO: Handle...this is bad!
1384 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
1385 unimplemented!();// but def dont push the event...
1387 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1388 node_id: forward_chan.get_their_node_id(),
1389 updates: msgs::CommitmentUpdate {
1390 update_add_htlcs: add_htlc_msgs,
1391 update_fulfill_htlcs: Vec::new(),
1392 update_fail_htlcs: Vec::new(),
1393 update_fail_malformed_htlcs: Vec::new(),
1395 commitment_signed: commitment_msg,
1400 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1401 let prev_hop_data = HTLCPreviousHopData {
1402 short_channel_id: prev_short_channel_id,
1403 htlc_id: prev_htlc_id,
1404 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1406 match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
1407 hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
1408 hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
1410 new_events.push(events::Event::PaymentReceived {
1411 payment_hash: forward_info.payment_hash,
1412 amt: forward_info.amt_to_forward,
1419 for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
1421 None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
1422 Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
1426 if new_events.is_empty() { return }
1427 let mut events = self.pending_events.lock().unwrap();
1428 events.append(&mut new_events);
1431 /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event.
1432 pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool {
1433 let _ = self.total_consistency_lock.read().unwrap();
1435 let mut channel_state = Some(self.channel_state.lock().unwrap());
1436 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
1437 if let Some(mut sources) = removed_source {
1438 for htlc_with_hash in sources.drain(..) {
1439 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1440 self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: if reason == PaymentFailReason::PreimageUnknown {0x4000 | 15} else {0x4000 | 16}, data: Vec::new() });
1446 /// Fails an HTLC backwards to the sender of it to us.
1447 /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
1448 /// There are several callsites that do stupid things like loop over a list of payment_hashes
1449 /// to fail and take the channel_state lock for each iteration (as we take ownership and may
1450 /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
1451 /// still-available channels.
1452 fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) {
1454 HTLCSource::OutboundRoute { .. } => {
1455 mem::drop(channel_state_lock);
1456 if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
1457 let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
1458 if let Some(update) = channel_update {
1459 self.channel_state.lock().unwrap().pending_msg_events.push(
1460 events::MessageSendEvent::PaymentFailureNetworkUpdate {
1465 self.pending_events.lock().unwrap().push(events::Event::PaymentFailed {
1466 payment_hash: payment_hash.clone(),
1467 rejected_by_dest: !payment_retryable,
1470 panic!("should have onion error packet here");
1473 HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
1474 let err_packet = match onion_error {
1475 HTLCFailReason::Reason { failure_code, data } => {
1476 let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
1477 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
1479 HTLCFailReason::ErrorPacket { err } => {
1480 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
1484 let channel_state = channel_state_lock.borrow_parts();
1486 let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1487 Some(chan_id) => chan_id.clone(),
1491 let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1492 match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
1493 Ok(Some((msg, commitment_msg, chan_monitor))) => {
1494 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1497 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1498 node_id: chan.get_their_node_id(),
1499 updates: msgs::CommitmentUpdate {
1500 update_add_htlcs: Vec::new(),
1501 update_fulfill_htlcs: Vec::new(),
1502 update_fail_htlcs: vec![msg],
1503 update_fail_malformed_htlcs: Vec::new(),
1505 commitment_signed: commitment_msg,
1511 //TODO: Do something with e?
1519 /// Provides a payment preimage in response to a PaymentReceived event, returning true and
1520 /// generating message events for the net layer to claim the payment, if possible. Thus, you
1521 /// should probably kick the net layer to go send messages if this returns true!
1523 /// May panic if called except in response to a PaymentReceived event.
1524 pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
1525 let mut sha = Sha256::new();
1526 sha.input(&payment_preimage);
1527 let mut payment_hash = [0; 32];
1528 sha.result(&mut payment_hash);
1530 let _ = self.total_consistency_lock.read().unwrap();
1532 let mut channel_state = Some(self.channel_state.lock().unwrap());
1533 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
1534 if let Some(mut sources) = removed_source {
1535 for htlc_with_hash in sources.drain(..) {
1536 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1537 self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
1542 fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: [u8; 32]) {
1544 HTLCSource::OutboundRoute { .. } => {
1545 mem::drop(channel_state_lock);
1546 let mut pending_events = self.pending_events.lock().unwrap();
1547 pending_events.push(events::Event::PaymentSent {
1551 HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
1552 //TODO: Delay the claimed_funds relaying just like we do outbound relay!
1553 let channel_state = channel_state_lock.borrow_parts();
1555 let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1556 Some(chan_id) => chan_id.clone(),
1558 // TODO: There is probably a channel manager somewhere that needs to
1559 // learn the preimage as the channel already hit the chain and that's
1565 let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1566 match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
1567 Ok((msgs, monitor_option)) => {
1568 if let Some(chan_monitor) = monitor_option {
1569 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1570 unimplemented!();// but def dont push the event...
1573 if let Some((msg, commitment_signed)) = msgs {
1574 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1575 node_id: chan.get_their_node_id(),
1576 updates: msgs::CommitmentUpdate {
1577 update_add_htlcs: Vec::new(),
1578 update_fulfill_htlcs: vec![msg],
1579 update_fail_htlcs: Vec::new(),
1580 update_fail_malformed_htlcs: Vec::new(),
1588 // TODO: There is probably a channel manager somewhere that needs to
1589 // learn the preimage as the channel may be about to hit the chain.
1590 //TODO: Do something with e?
1598 /// Gets the node_id held by this ChannelManager
1599 pub fn get_our_node_id(&self) -> PublicKey {
1600 PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
1603 /// Used to restore channels to normal operation after a
1604 /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
1606 pub fn test_restore_channel_monitor(&self) {
1607 let mut close_results = Vec::new();
1608 let mut htlc_forwards = Vec::new();
1609 let mut htlc_failures = Vec::new();
1610 let _ = self.total_consistency_lock.read().unwrap();
1613 let mut channel_lock = self.channel_state.lock().unwrap();
1614 let channel_state = channel_lock.borrow_parts();
1615 let short_to_id = channel_state.short_to_id;
1616 let pending_msg_events = channel_state.pending_msg_events;
1617 channel_state.by_id.retain(|_, channel| {
1618 if channel.is_awaiting_monitor_update() {
1619 let chan_monitor = channel.channel_monitor();
1620 if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1622 ChannelMonitorUpdateErr::PermanentFailure => {
1623 if let Some(short_id) = channel.get_short_channel_id() {
1624 short_to_id.remove(&short_id);
1626 close_results.push(channel.force_shutdown());
1627 if let Ok(update) = self.get_channel_update(&channel) {
1628 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1634 ChannelMonitorUpdateErr::TemporaryFailure => true,
1637 let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored();
1638 if !pending_forwards.is_empty() {
1639 htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
1641 htlc_failures.append(&mut pending_failures);
1643 macro_rules! handle_cs { () => {
1644 if let Some(update) = commitment_update {
1645 pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1646 node_id: channel.get_their_node_id(),
1651 macro_rules! handle_raa { () => {
1652 if let Some(revoke_and_ack) = raa {
1653 pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
1654 node_id: channel.get_their_node_id(),
1655 msg: revoke_and_ack,
1660 RAACommitmentOrder::CommitmentFirst => {
1664 RAACommitmentOrder::RevokeAndACKFirst => {
1675 for failure in htlc_failures.drain(..) {
1676 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
1678 self.forward_htlcs(&mut htlc_forwards[..]);
1680 for res in close_results.drain(..) {
1681 self.finish_force_close_channel(res);
1685 fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
1686 if msg.chain_hash != self.genesis_hash {
1687 return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
1690 let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration)
1691 .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
1692 let mut channel_state_lock = self.channel_state.lock().unwrap();
1693 let channel_state = channel_state_lock.borrow_parts();
1694 match channel_state.by_id.entry(channel.channel_id()) {
1695 hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
1696 hash_map::Entry::Vacant(entry) => {
1697 channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
1698 node_id: their_node_id.clone(),
1699 msg: channel.get_accept_channel(),
1701 entry.insert(channel);
1707 fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
1708 let (value, output_script, user_id) = {
1709 let mut channel_state = self.channel_state.lock().unwrap();
1710 match channel_state.by_id.get_mut(&msg.temporary_channel_id) {
1712 if chan.get_their_node_id() != *their_node_id {
1713 //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
1714 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1716 chan.accept_channel(&msg, &self.default_configuration)
1717 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?;
1718 (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id())
1720 //TODO: same as above
1721 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1724 let mut pending_events = self.pending_events.lock().unwrap();
1725 pending_events.push(events::Event::FundingGenerationReady {
1726 temporary_channel_id: msg.temporary_channel_id,
1727 channel_value_satoshis: value,
1728 output_script: output_script,
1729 user_channel_id: user_id,
1734 fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
1735 let (chan, funding_msg, monitor_update) = {
1736 let mut channel_state = self.channel_state.lock().unwrap();
1737 match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
1738 hash_map::Entry::Occupied(mut chan) => {
1739 if chan.get().get_their_node_id() != *their_node_id {
1740 //TODO: here and below MsgHandleErrInternal, #153 case
1741 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1743 match chan.get_mut().funding_created(msg) {
1744 Ok((funding_msg, monitor_update)) => {
1745 (chan.remove(), funding_msg, monitor_update)
1748 return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1752 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1755 // Because we have exclusive ownership of the channel here we can release the channel_state
1756 // lock before add_update_monitor
1757 if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
1760 let mut channel_state_lock = self.channel_state.lock().unwrap();
1761 let channel_state = channel_state_lock.borrow_parts();
1762 match channel_state.by_id.entry(funding_msg.channel_id) {
1763 hash_map::Entry::Occupied(_) => {
1764 return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
1766 hash_map::Entry::Vacant(e) => {
1767 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
1768 node_id: their_node_id.clone(),
1777 fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
1778 let (funding_txo, user_id) = {
1779 let mut channel_state = self.channel_state.lock().unwrap();
1780 match channel_state.by_id.get_mut(&msg.channel_id) {
1782 if chan.get_their_node_id() != *their_node_id {
1783 //TODO: here and below MsgHandleErrInternal, #153 case
1784 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1786 let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1787 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1790 (chan.get_funding_txo().unwrap(), chan.get_user_id())
1792 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1795 let mut pending_events = self.pending_events.lock().unwrap();
1796 pending_events.push(events::Event::FundingBroadcastSafe {
1797 funding_txo: funding_txo,
1798 user_channel_id: user_id,
1803 fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
1804 let mut channel_state_lock = self.channel_state.lock().unwrap();
1805 let channel_state = channel_state_lock.borrow_parts();
1806 match channel_state.by_id.get_mut(&msg.channel_id) {
1808 if chan.get_their_node_id() != *their_node_id {
1809 //TODO: here and below MsgHandleErrInternal, #153 case
1810 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1812 chan.funding_locked(&msg)
1813 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1814 if let Some(announcement_sigs) = self.get_announcement_sigs(chan) {
1815 channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
1816 node_id: their_node_id.clone(),
1817 msg: announcement_sigs,
1822 None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1826 fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
1827 let (mut dropped_htlcs, chan_option) = {
1828 let mut channel_state_lock = self.channel_state.lock().unwrap();
1829 let channel_state = channel_state_lock.borrow_parts();
1831 match channel_state.by_id.entry(msg.channel_id.clone()) {
1832 hash_map::Entry::Occupied(mut chan_entry) => {
1833 if chan_entry.get().get_their_node_id() != *their_node_id {
1834 //TODO: here and below MsgHandleErrInternal, #153 case
1835 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1837 let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1838 if let Some(msg) = shutdown {
1839 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
1840 node_id: their_node_id.clone(),
1844 if let Some(msg) = closing_signed {
1845 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
1846 node_id: their_node_id.clone(),
1850 if chan_entry.get().is_shutdown() {
1851 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1852 channel_state.short_to_id.remove(&short_id);
1854 (dropped_htlcs, Some(chan_entry.remove_entry().1))
1855 } else { (dropped_htlcs, None) }
1857 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1860 for htlc_source in dropped_htlcs.drain(..) {
1861 // unknown_next_peer...I dunno who that is anymore....
1862 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
1864 if let Some(chan) = chan_option {
1865 if let Ok(update) = self.get_channel_update(&chan) {
1866 let mut channel_state = self.channel_state.lock().unwrap();
1867 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1875 fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
1876 let (tx, chan_option) = {
1877 let mut channel_state_lock = self.channel_state.lock().unwrap();
1878 let channel_state = channel_state_lock.borrow_parts();
1879 match channel_state.by_id.entry(msg.channel_id.clone()) {
1880 hash_map::Entry::Occupied(mut chan_entry) => {
1881 if chan_entry.get().get_their_node_id() != *their_node_id {
1882 //TODO: here and below MsgHandleErrInternal, #153 case
1883 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1885 let (closing_signed, tx) = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1886 if let Some(msg) = closing_signed {
1887 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
1888 node_id: their_node_id.clone(),
1893 // We're done with this channel, we've got a signed closing transaction and
1894 // will send the closing_signed back to the remote peer upon return. This
1895 // also implies there are no pending HTLCs left on the channel, so we can
1896 // fully delete it from tracking (the channel monitor is still around to
1897 // watch for old state broadcasts)!
1898 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1899 channel_state.short_to_id.remove(&short_id);
1901 (tx, Some(chan_entry.remove_entry().1))
1902 } else { (tx, None) }
1904 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1907 if let Some(broadcast_tx) = tx {
1908 self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
1910 if let Some(chan) = chan_option {
1911 if let Ok(update) = self.get_channel_update(&chan) {
1912 let mut channel_state = self.channel_state.lock().unwrap();
1913 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1921 fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
1922 //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
1923 //determine the state of the payment based on our response/if we forward anything/the time
1924 //we take to respond. We should take care to avoid allowing such an attack.
1926 //TODO: There exists a further attack where a node may garble the onion data, forward it to
1927 //us repeatedly garbled in different ways, and compare our error messages, which are
1928 //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
1929 //but we should prevent it anyway.
1931 let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
1932 let channel_state = channel_state_lock.borrow_parts();
1934 match channel_state.by_id.get_mut(&msg.channel_id) {
1936 if chan.get_their_node_id() != *their_node_id {
1937 //TODO: here MsgHandleErrInternal, #153 case
1938 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1940 if !chan.is_usable() {
1941 // If the update_add is completely bogus, the call will Err and we will close,
1942 // but if we've sent a shutdown and they haven't acknowledged it yet, we just
1943 // want to reject the new HTLC and fail it backwards instead of forwarding.
1944 if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
1945 let chan_update = self.get_channel_update(chan);
1946 pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
1947 channel_id: msg.channel_id,
1948 htlc_id: msg.htlc_id,
1949 reason: if let Ok(update) = chan_update {
1950 ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &update.encode_with_len()[..])
1952 // This can only happen if the channel isn't in the fully-funded
1953 // state yet, implying our counterparty is trying to route payments
1954 // over the channel back to themselves (cause no one else should
1955 // know the short_id is a lightning channel yet). We should have no
1956 // problem just calling this unknown_next_peer
1957 ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[])
1962 chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1964 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1968 fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
1969 let mut channel_state = self.channel_state.lock().unwrap();
1970 let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
1972 if chan.get_their_node_id() != *their_node_id {
1973 //TODO: here and below MsgHandleErrInternal, #153 case
1974 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1976 chan.update_fulfill_htlc(&msg)
1977 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone()
1979 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1981 self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone());
1985 // Process failure we got back from upstream on a payment we sent. Returns update and a boolean
1986 // indicating that the payment itself failed
1987 fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool) {
1988 if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
1989 macro_rules! onion_failure_log {
1990 ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => {
1991 log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value);
1993 ( $error_code_textual: expr, $error_code: expr ) => {
1994 log_trace!(self, "{}({})", $error_code_textual, $error_code);
1998 const BADONION: u16 = 0x8000;
1999 const PERM: u16 = 0x4000;
2000 const UPDATE: u16 = 0x1000;
2003 let mut htlc_msat = *first_hop_htlc_msat;
2005 // Handle packed channel/node updates for passing back for the route handler
2006 Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
2007 if res.is_some() { return; }
2009 let incoming_htlc_msat = htlc_msat;
2010 let amt_to_forward = htlc_msat - route_hop.fee_msat;
2011 htlc_msat = amt_to_forward;
2013 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret[..]);
2015 let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
2016 decryption_tmp.resize(packet_decrypted.len(), 0);
2017 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
2018 chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
2019 packet_decrypted = decryption_tmp;
2021 let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
2023 if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
2024 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret[..]);
2025 let mut hmac = Hmac::new(Sha256::new(), &um);
2026 hmac.input(&err_packet.encode()[32..]);
2027 let mut calc_tag = [0u8; 32];
2028 hmac.raw_result(&mut calc_tag);
2030 if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
2031 if err_packet.failuremsg.len() < 2 {
2032 // Useless packet that we can't use but it passed HMAC, so it
2033 // definitely came from the peer in question
2034 res = Some((None, !is_from_final_node));
2036 let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]);
2038 match error_code & 0xff {
2040 // either from an intermediate or final node
2041 // invalid_realm(PERM|1),
2042 // temporary_node_failure(NODE|2)
2043 // permanent_node_failure(PERM|NODE|2)
2044 // required_node_feature_mssing(PERM|NODE|3)
2045 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2046 node_id: route_hop.pubkey,
2047 is_permanent: error_code & PERM == PERM,
2048 }), !(error_code & PERM == PERM && is_from_final_node)));
2049 // node returning invalid_realm is removed from network_map,
2050 // although NODE flag is not set, TODO: or remove channel only?
2051 // retry payment when removed node is not a final node
2057 if is_from_final_node {
2058 let payment_retryable = match error_code {
2059 c if c == PERM|15 => false, // unknown_payment_hash
2060 c if c == PERM|16 => false, // incorrect_payment_amount
2061 17 => true, // final_expiry_too_soon
2062 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry
2063 let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
2066 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount
2067 let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
2071 // A final node has sent us either an invalid code or an error_code that
2072 // MUST be sent from the processing node, or the formmat of failuremsg
2073 // does not coform to the spec.
2074 // Remove it from the network map and don't may retry payment
2075 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2076 node_id: route_hop.pubkey,
2082 res = Some((None, payment_retryable));
2086 // now, error_code should be only from the intermediate nodes
2088 _c if error_code & PERM == PERM => {
2089 res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
2090 short_channel_id: route_hop.short_channel_id,
2094 _c if error_code & UPDATE == UPDATE => {
2095 let offset = match error_code {
2096 c if c == UPDATE|7 => 0, // temporary_channel_failure
2097 c if c == UPDATE|11 => 8, // amount_below_minimum
2098 c if c == UPDATE|12 => 8, // fee_insufficient
2099 c if c == UPDATE|13 => 4, // incorrect_cltv_expiry
2100 c if c == UPDATE|14 => 0, // expiry_too_soon
2101 c if c == UPDATE|20 => 2, // channel_disabled
2103 // node sending unknown code
2104 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2105 node_id: route_hop.pubkey,
2112 if err_packet.failuremsg.len() >= offset + 2 {
2113 let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize;
2114 if err_packet.failuremsg.len() >= offset + 4 + update_len {
2115 if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) {
2116 // if channel_update should NOT have caused the failure:
2117 // MAY treat the channel_update as invalid.
2118 let is_chan_update_invalid = match error_code {
2119 c if c == UPDATE|7 => { // temporary_channel_failure
2122 c if c == UPDATE|11 => { // amount_below_minimum
2123 let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
2124 onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat);
2125 incoming_htlc_msat > chan_update.contents.htlc_minimum_msat
2127 c if c == UPDATE|12 => { // fee_insufficient
2128 let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
2129 let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
2130 onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat);
2131 new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap()
2133 c if c == UPDATE|13 => { // incorrect_cltv_expiry
2134 let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
2135 onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry);
2136 route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta
2138 c if c == UPDATE|20 => { // channel_disabled
2139 let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]);
2140 onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags);
2141 chan_update.contents.flags & 0x01 == 0x01
2143 c if c == UPDATE|21 => true, // expiry_too_far
2144 _ => { unreachable!(); },
2147 let msg = if is_chan_update_invalid { None } else {
2148 Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
2152 res = Some((msg, true));
2158 _c if error_code & BADONION == BADONION => {
2161 14 => { // expiry_too_soon
2162 res = Some((None, true));
2166 // node sending unknown code
2167 res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
2168 node_id: route_hop.pubkey,
2177 }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
2178 res.unwrap_or((None, true))
2179 } else { ((None, true)) }
2182 fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
2183 let mut channel_state = self.channel_state.lock().unwrap();
2184 match channel_state.by_id.get_mut(&msg.channel_id) {
2186 if chan.get_their_node_id() != *their_node_id {
2187 //TODO: here and below MsgHandleErrInternal, #153 case
2188 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2190 chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
2191 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
2193 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2198 fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
2199 let mut channel_state = self.channel_state.lock().unwrap();
2200 match channel_state.by_id.get_mut(&msg.channel_id) {
2202 if chan.get_their_node_id() != *their_node_id {
2203 //TODO: here and below MsgHandleErrInternal, #153 case
2204 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2206 if (msg.failure_code & 0x8000) == 0 {
2207 return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION not set", msg.channel_id));
2209 chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
2210 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2213 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2217 fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
2218 let mut channel_state_lock = self.channel_state.lock().unwrap();
2219 let channel_state = channel_state_lock.borrow_parts();
2220 match channel_state.by_id.get_mut(&msg.channel_id) {
2222 if chan.get_their_node_id() != *their_node_id {
2223 //TODO: here and below MsgHandleErrInternal, #153 case
2224 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2226 let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
2227 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2230 channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
2231 node_id: their_node_id.clone(),
2232 msg: revoke_and_ack,
2234 if let Some(msg) = commitment_signed {
2235 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2236 node_id: their_node_id.clone(),
2237 updates: msgs::CommitmentUpdate {
2238 update_add_htlcs: Vec::new(),
2239 update_fulfill_htlcs: Vec::new(),
2240 update_fail_htlcs: Vec::new(),
2241 update_fail_malformed_htlcs: Vec::new(),
2243 commitment_signed: msg,
2247 if let Some(msg) = closing_signed {
2248 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
2249 node_id: their_node_id.clone(),
2255 None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2260 fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
2261 for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
2262 let mut forward_event = None;
2263 if !pending_forwards.is_empty() {
2264 let mut channel_state = self.channel_state.lock().unwrap();
2265 if channel_state.forward_htlcs.is_empty() {
2266 forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
2267 channel_state.next_forward = forward_event.unwrap();
2269 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
2270 match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
2271 hash_map::Entry::Occupied(mut entry) => {
2272 entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info });
2274 hash_map::Entry::Vacant(entry) => {
2275 entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }));
2280 match forward_event {
2282 let mut pending_events = self.pending_events.lock().unwrap();
2283 pending_events.push(events::Event::PendingHTLCsForwardable {
2284 time_forwardable: time
2292 fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
2293 let (pending_forwards, mut pending_failures, short_channel_id) = {
2294 let mut channel_state_lock = self.channel_state.lock().unwrap();
2295 let channel_state = channel_state_lock.borrow_parts();
2296 match channel_state.by_id.get_mut(&msg.channel_id) {
2298 if chan.get_their_node_id() != *their_node_id {
2299 //TODO: here and below MsgHandleErrInternal, #153 case
2300 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2302 let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
2303 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2306 if let Some(updates) = commitment_update {
2307 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2308 node_id: their_node_id.clone(),
2312 if let Some(msg) = closing_signed {
2313 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
2314 node_id: their_node_id.clone(),
2318 (pending_forwards, pending_failures, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
2320 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2323 for failure in pending_failures.drain(..) {
2324 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
2326 self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
2331 fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
2332 let mut channel_state = self.channel_state.lock().unwrap();
2333 match channel_state.by_id.get_mut(&msg.channel_id) {
2335 if chan.get_their_node_id() != *their_node_id {
2336 //TODO: here and below MsgHandleErrInternal, #153 case
2337 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2339 chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
2341 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2345 fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
2346 let mut channel_state_lock = self.channel_state.lock().unwrap();
2347 let channel_state = channel_state_lock.borrow_parts();
2349 match channel_state.by_id.get_mut(&msg.channel_id) {
2351 if chan.get_their_node_id() != *their_node_id {
2352 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2354 if !chan.is_usable() {
2355 return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
2358 let our_node_id = self.get_our_node_id();
2359 let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())
2360 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2362 let were_node_one = announcement.node_id_1 == our_node_id;
2363 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
2364 let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id);
2365 secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action);
2366 secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action);
2368 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
2370 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
2371 msg: msgs::ChannelAnnouncement {
2372 node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
2373 node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
2374 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
2375 bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
2376 contents: announcement,
2378 update_msg: self.get_channel_update(chan).unwrap(), // can only fail if we're not in a ready state
2381 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2386 fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
2387 let mut channel_state_lock = self.channel_state.lock().unwrap();
2388 let channel_state = channel_state_lock.borrow_parts();
2390 match channel_state.by_id.get_mut(&msg.channel_id) {
2392 if chan.get_their_node_id() != *their_node_id {
2393 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
2395 let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg)
2396 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
2397 if let Some(monitor) = channel_monitor {
2398 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
2402 if let Some(msg) = funding_locked {
2403 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
2404 node_id: their_node_id.clone(),
2408 macro_rules! send_raa { () => {
2409 if let Some(msg) = revoke_and_ack {
2410 channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
2411 node_id: their_node_id.clone(),
2416 macro_rules! send_cu { () => {
2417 if let Some(updates) = commitment_update {
2418 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2419 node_id: their_node_id.clone(),
2425 RAACommitmentOrder::RevokeAndACKFirst => {
2429 RAACommitmentOrder::CommitmentFirst => {
2434 if let Some(msg) = shutdown {
2435 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
2436 node_id: their_node_id.clone(),
2442 None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
2446 /// Begin Update fee process. Allowed only on an outbound channel.
2447 /// If successful, will generate a UpdateHTLCs event, so you should probably poll
2448 /// PeerManager::process_events afterwards.
2449 /// Note: This API is likely to change!
2451 pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
2452 let _ = self.total_consistency_lock.read().unwrap();
2453 let mut channel_state_lock = self.channel_state.lock().unwrap();
2454 let channel_state = channel_state_lock.borrow_parts();
2456 match channel_state.by_id.get_mut(&channel_id) {
2457 None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
2459 if !chan.is_outbound() {
2460 return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
2462 if chan.is_awaiting_monitor_update() {
2463 return Err(APIError::MonitorUpdateFailed);
2465 if !chan.is_live() {
2466 return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
2468 if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? {
2469 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2472 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2473 node_id: chan.get_their_node_id(),
2474 updates: msgs::CommitmentUpdate {
2475 update_add_htlcs: Vec::new(),
2476 update_fulfill_htlcs: Vec::new(),
2477 update_fail_htlcs: Vec::new(),
2478 update_fail_malformed_htlcs: Vec::new(),
2479 update_fee: Some(update_fee),
2490 impl events::MessageSendEventsProvider for ChannelManager {
2491 fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
2492 let mut ret = Vec::new();
2493 let mut channel_state = self.channel_state.lock().unwrap();
2494 mem::swap(&mut ret, &mut channel_state.pending_msg_events);
2499 impl events::EventsProvider for ChannelManager {
2500 fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
2501 let mut ret = Vec::new();
2502 let mut pending_events = self.pending_events.lock().unwrap();
2503 mem::swap(&mut ret, &mut *pending_events);
2508 impl ChainListener for ChannelManager {
2509 fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
2510 let _ = self.total_consistency_lock.read().unwrap();
2511 let mut failed_channels = Vec::new();
2513 let mut channel_lock = self.channel_state.lock().unwrap();
2514 let channel_state = channel_lock.borrow_parts();
2515 let short_to_id = channel_state.short_to_id;
2516 let pending_msg_events = channel_state.pending_msg_events;
2517 channel_state.by_id.retain(|_, channel| {
2518 let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
2519 if let Ok(Some(funding_locked)) = chan_res {
2520 pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
2521 node_id: channel.get_their_node_id(),
2522 msg: funding_locked,
2524 if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
2525 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
2526 node_id: channel.get_their_node_id(),
2527 msg: announcement_sigs,
2530 short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
2531 } else if let Err(e) = chan_res {
2532 pending_msg_events.push(events::MessageSendEvent::HandleError {
2533 node_id: channel.get_their_node_id(),
2536 if channel.is_shutdown() {
2540 if let Some(funding_txo) = channel.get_funding_txo() {
2541 for tx in txn_matched {
2542 for inp in tx.input.iter() {
2543 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
2544 if let Some(short_id) = channel.get_short_channel_id() {
2545 short_to_id.remove(&short_id);
2547 // It looks like our counterparty went on-chain. We go ahead and
2548 // broadcast our latest local state as well here, just in case its
2549 // some kind of SPV attack, though we expect these to be dropped.
2550 failed_channels.push(channel.force_shutdown());
2551 if let Ok(update) = self.get_channel_update(&channel) {
2552 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2561 if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
2562 if let Some(short_id) = channel.get_short_channel_id() {
2563 short_to_id.remove(&short_id);
2565 failed_channels.push(channel.force_shutdown());
2566 // If would_broadcast_at_height() is true, the channel_monitor will broadcast
2567 // the latest local tx for us, so we should skip that here (it doesn't really
2568 // hurt anything, but does make tests a bit simpler).
2569 failed_channels.last_mut().unwrap().0 = Vec::new();
2570 if let Ok(update) = self.get_channel_update(&channel) {
2571 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2580 for failure in failed_channels.drain(..) {
2581 self.finish_force_close_channel(failure);
2583 self.latest_block_height.store(height as usize, Ordering::Release);
2584 *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
2587 /// We force-close the channel without letting our counterparty participate in the shutdown
2588 fn block_disconnected(&self, header: &BlockHeader) {
2589 let _ = self.total_consistency_lock.read().unwrap();
2590 let mut failed_channels = Vec::new();
2592 let mut channel_lock = self.channel_state.lock().unwrap();
2593 let channel_state = channel_lock.borrow_parts();
2594 let short_to_id = channel_state.short_to_id;
2595 let pending_msg_events = channel_state.pending_msg_events;
2596 channel_state.by_id.retain(|_, v| {
2597 if v.block_disconnected(header) {
2598 if let Some(short_id) = v.get_short_channel_id() {
2599 short_to_id.remove(&short_id);
2601 failed_channels.push(v.force_shutdown());
2602 if let Ok(update) = self.get_channel_update(&v) {
2603 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2613 for failure in failed_channels.drain(..) {
2614 self.finish_force_close_channel(failure);
2616 self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
2617 *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
2621 macro_rules! handle_error {
2622 ($self: ident, $internal: expr, $their_node_id: expr) => {
2625 Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
2626 if needs_channel_force_close {
2628 &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
2629 if msg.channel_id == [0; 32] {
2630 $self.peer_disconnected(&$their_node_id, true);
2632 $self.force_close_channel(&msg.channel_id);
2635 &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
2636 &Some(msgs::ErrorAction::IgnoreError) => {},
2637 &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
2638 if msg.channel_id == [0; 32] {
2639 $self.peer_disconnected(&$their_node_id, true);
2641 $self.force_close_channel(&msg.channel_id);
2653 impl ChannelMessageHandler for ChannelManager {
2654 //TODO: Handle errors and close channel (or so)
2655 fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
2656 let _ = self.total_consistency_lock.read().unwrap();
2657 handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id)
2660 fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
2661 let _ = self.total_consistency_lock.read().unwrap();
2662 handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id)
2665 fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {
2666 let _ = self.total_consistency_lock.read().unwrap();
2667 handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id)
2670 fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
2671 let _ = self.total_consistency_lock.read().unwrap();
2672 handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id)
2675 fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> {
2676 let _ = self.total_consistency_lock.read().unwrap();
2677 handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id)
2680 fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> {
2681 let _ = self.total_consistency_lock.read().unwrap();
2682 handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id)
2685 fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> {
2686 let _ = self.total_consistency_lock.read().unwrap();
2687 handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id)
2690 fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
2691 let _ = self.total_consistency_lock.read().unwrap();
2692 handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id)
2695 fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
2696 let _ = self.total_consistency_lock.read().unwrap();
2697 handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
2700 fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
2701 let _ = self.total_consistency_lock.read().unwrap();
2702 handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
2705 fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
2706 let _ = self.total_consistency_lock.read().unwrap();
2707 handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id)
2710 fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> {
2711 let _ = self.total_consistency_lock.read().unwrap();
2712 handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id)
2715 fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> {
2716 let _ = self.total_consistency_lock.read().unwrap();
2717 handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id)
2720 fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
2721 let _ = self.total_consistency_lock.read().unwrap();
2722 handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id)
2725 fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
2726 let _ = self.total_consistency_lock.read().unwrap();
2727 handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
2730 fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> {
2731 let _ = self.total_consistency_lock.read().unwrap();
2732 handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
2735 fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
2736 let _ = self.total_consistency_lock.read().unwrap();
2737 let mut failed_channels = Vec::new();
2738 let mut failed_payments = Vec::new();
2740 let mut channel_state_lock = self.channel_state.lock().unwrap();
2741 let channel_state = channel_state_lock.borrow_parts();
2742 let short_to_id = channel_state.short_to_id;
2743 let pending_msg_events = channel_state.pending_msg_events;
2744 if no_connection_possible {
2745 log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id));
2746 channel_state.by_id.retain(|_, chan| {
2747 if chan.get_their_node_id() == *their_node_id {
2748 if let Some(short_id) = chan.get_short_channel_id() {
2749 short_to_id.remove(&short_id);
2751 failed_channels.push(chan.force_shutdown());
2752 if let Ok(update) = self.get_channel_update(&chan) {
2753 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2763 log_debug!(self, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id));
2764 channel_state.by_id.retain(|_, chan| {
2765 if chan.get_their_node_id() == *their_node_id {
2766 //TODO: mark channel disabled (and maybe announce such after a timeout).
2767 let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused();
2768 if !failed_adds.is_empty() {
2769 let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
2770 failed_payments.push((chan_update, failed_adds));
2772 if chan.is_shutdown() {
2773 if let Some(short_id) = chan.get_short_channel_id() {
2774 short_to_id.remove(&short_id);
2783 for failure in failed_channels.drain(..) {
2784 self.finish_force_close_channel(failure);
2786 for (chan_update, mut htlc_sources) in failed_payments {
2787 for (htlc_source, payment_hash) in htlc_sources.drain(..) {
2788 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
2793 fn peer_connected(&self, their_node_id: &PublicKey) {
2794 log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
2796 let _ = self.total_consistency_lock.read().unwrap();
2797 let mut channel_state_lock = self.channel_state.lock().unwrap();
2798 let channel_state = channel_state_lock.borrow_parts();
2799 let pending_msg_events = channel_state.pending_msg_events;
2800 channel_state.by_id.retain(|_, chan| {
2801 if chan.get_their_node_id() == *their_node_id {
2802 if !chan.have_received_message() {
2803 // If we created this (outbound) channel while we were disconnected from the
2804 // peer we probably failed to send the open_channel message, which is now
2805 // lost. We can't have had anything pending related to this channel, so we just
2809 pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
2810 node_id: chan.get_their_node_id(),
2811 msg: chan.get_channel_reestablish(),
2817 //TODO: Also re-broadcast announcement_signatures
2820 fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
2821 let _ = self.total_consistency_lock.read().unwrap();
2823 if msg.channel_id == [0; 32] {
2824 for chan in self.list_channels() {
2825 if chan.remote_network_id == *their_node_id {
2826 self.force_close_channel(&chan.channel_id);
2830 self.force_close_channel(&msg.channel_id);
2835 const SERIALIZATION_VERSION: u8 = 1;
2836 const MIN_SERIALIZATION_VERSION: u8 = 1;
2838 impl Writeable for PendingForwardHTLCInfo {
2839 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2840 if let &Some(ref onion) = &self.onion_packet {
2842 onion.write(writer)?;
2846 self.incoming_shared_secret.write(writer)?;
2847 self.payment_hash.write(writer)?;
2848 self.short_channel_id.write(writer)?;
2849 self.amt_to_forward.write(writer)?;
2850 self.outgoing_cltv_value.write(writer)?;
2855 impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
2856 fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
2857 let onion_packet = match <u8 as Readable<R>>::read(reader)? {
2859 1 => Some(msgs::OnionPacket::read(reader)?),
2860 _ => return Err(DecodeError::InvalidValue),
2862 Ok(PendingForwardHTLCInfo {
2864 incoming_shared_secret: Readable::read(reader)?,
2865 payment_hash: Readable::read(reader)?,
2866 short_channel_id: Readable::read(reader)?,
2867 amt_to_forward: Readable::read(reader)?,
2868 outgoing_cltv_value: Readable::read(reader)?,
2873 impl Writeable for HTLCFailureMsg {
2874 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2876 &HTLCFailureMsg::Relay(ref fail_msg) => {
2878 fail_msg.write(writer)?;
2880 &HTLCFailureMsg::Malformed(ref fail_msg) => {
2882 fail_msg.write(writer)?;
2889 impl<R: ::std::io::Read> Readable<R> for HTLCFailureMsg {
2890 fn read(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
2891 match <u8 as Readable<R>>::read(reader)? {
2892 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
2893 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
2894 _ => Err(DecodeError::InvalidValue),
2899 impl Writeable for PendingHTLCStatus {
2900 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2902 &PendingHTLCStatus::Forward(ref forward_info) => {
2904 forward_info.write(writer)?;
2906 &PendingHTLCStatus::Fail(ref fail_msg) => {
2908 fail_msg.write(writer)?;
2915 impl<R: ::std::io::Read> Readable<R> for PendingHTLCStatus {
2916 fn read(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
2917 match <u8 as Readable<R>>::read(reader)? {
2918 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
2919 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
2920 _ => Err(DecodeError::InvalidValue),
2925 impl_writeable!(HTLCPreviousHopData, 0, {
2928 incoming_packet_shared_secret
2931 impl Writeable for HTLCSource {
2932 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2934 &HTLCSource::PreviousHopData(ref hop_data) => {
2936 hop_data.write(writer)?;
2938 &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => {
2940 route.write(writer)?;
2941 session_priv.write(writer)?;
2942 first_hop_htlc_msat.write(writer)?;
2949 impl<R: ::std::io::Read> Readable<R> for HTLCSource {
2950 fn read(reader: &mut R) -> Result<HTLCSource, DecodeError> {
2951 match <u8 as Readable<R>>::read(reader)? {
2952 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
2953 1 => Ok(HTLCSource::OutboundRoute {
2954 route: Readable::read(reader)?,
2955 session_priv: Readable::read(reader)?,
2956 first_hop_htlc_msat: Readable::read(reader)?,
2958 _ => Err(DecodeError::InvalidValue),
2963 impl Writeable for HTLCFailReason {
2964 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
2966 &HTLCFailReason::ErrorPacket { ref err } => {
2970 &HTLCFailReason::Reason { ref failure_code, ref data } => {
2972 failure_code.write(writer)?;
2973 data.write(writer)?;
2980 impl<R: ::std::io::Read> Readable<R> for HTLCFailReason {
2981 fn read(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
2982 match <u8 as Readable<R>>::read(reader)? {
2983 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }),
2984 1 => Ok(HTLCFailReason::Reason {
2985 failure_code: Readable::read(reader)?,
2986 data: Readable::read(reader)?,
2988 _ => Err(DecodeError::InvalidValue),
2993 impl_writeable!(HTLCForwardInfo, 0, {
2994 prev_short_channel_id,
2999 impl Writeable for ChannelManager {
3000 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
3001 let _ = self.total_consistency_lock.write().unwrap();
3003 writer.write_all(&[SERIALIZATION_VERSION; 1])?;
3004 writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
3006 self.genesis_hash.write(writer)?;
3007 (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
3008 self.last_block_hash.lock().unwrap().write(writer)?;
3010 let channel_state = self.channel_state.lock().unwrap();
3011 let mut unfunded_channels = 0;
3012 for (_, channel) in channel_state.by_id.iter() {
3013 if !channel.is_funding_initiated() {
3014 unfunded_channels += 1;
3017 ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
3018 for (_, channel) in channel_state.by_id.iter() {
3019 if channel.is_funding_initiated() {
3020 channel.write(writer)?;
3024 (channel_state.forward_htlcs.len() as u64).write(writer)?;
3025 for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
3026 short_channel_id.write(writer)?;
3027 (pending_forwards.len() as u64).write(writer)?;
3028 for forward in pending_forwards {
3029 forward.write(writer)?;
3033 (channel_state.claimable_htlcs.len() as u64).write(writer)?;
3034 for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
3035 payment_hash.write(writer)?;
3036 (previous_hops.len() as u64).write(writer)?;
3037 for previous_hop in previous_hops {
3038 previous_hop.write(writer)?;
3046 /// Arguments for the creation of a ChannelManager that are not deserialized.
3048 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
3050 /// 1) Deserialize all stored ChannelMonitors.
3051 /// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
3052 /// ChannelManager)>::read(reader, args).
3053 /// This may result in closing some Channels if the ChannelMonitor is newer than the stored
3054 /// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
3055 /// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
3056 /// ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo().
3057 /// 4) Reconnect blocks on your ChannelMonitors.
3058 /// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
3059 /// 6) Disconnect/connect blocks on the ChannelManager.
3060 /// 7) Register the new ChannelManager with your ChainWatchInterface (this does not happen
3061 /// automatically as it does in ChannelManager::new()).
3062 pub struct ChannelManagerReadArgs<'a> {
3063 /// The keys provider which will give us relevant keys. Some keys will be loaded during
3064 /// deserialization.
3065 pub keys_manager: Arc<KeysInterface>,
3067 /// The fee_estimator for use in the ChannelManager in the future.
3069 /// No calls to the FeeEstimator will be made during deserialization.
3070 pub fee_estimator: Arc<FeeEstimator>,
3071 /// The ManyChannelMonitor for use in the ChannelManager in the future.
3073 /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
3074 /// you have deserialized ChannelMonitors separately and will add them to your
3075 /// ManyChannelMonitor after deserializing this ChannelManager.
3076 pub monitor: Arc<ManyChannelMonitor>,
3077 /// The ChainWatchInterface for use in the ChannelManager in the future.
3079 /// No calls to the ChainWatchInterface will be made during deserialization.
3080 pub chain_monitor: Arc<ChainWatchInterface>,
3081 /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
3082 /// used to broadcast the latest local commitment transactions of channels which must be
3083 /// force-closed during deserialization.
3084 pub tx_broadcaster: Arc<BroadcasterInterface>,
3085 /// The Logger for use in the ChannelManager and which may be used to log information during
3086 /// deserialization.
3087 pub logger: Arc<Logger>,
3088 /// Default settings used for new channels. Any existing channels will continue to use the
3089 /// runtime settings which were stored when the ChannelManager was serialized.
3090 pub default_config: UserConfig,
3092 /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
3093 /// value.get_funding_txo() should be the key).
3095 /// If a monitor is inconsistent with the channel state during deserialization the channel will
3096 /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This
3097 /// is true for missing channels as well. If there is a monitor missing for which we find
3098 /// channel data Err(DecodeError::InvalidValue) will be returned.
3100 /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
3102 pub channel_monitors: &'a HashMap<OutPoint, &'a ChannelMonitor>,
3105 impl<'a, R : ::std::io::Read> ReadableArgs<R, ChannelManagerReadArgs<'a>> for (Sha256dHash, ChannelManager) {
3106 fn read(reader: &mut R, args: ChannelManagerReadArgs<'a>) -> Result<Self, DecodeError> {
3107 let _ver: u8 = Readable::read(reader)?;
3108 let min_ver: u8 = Readable::read(reader)?;
3109 if min_ver > SERIALIZATION_VERSION {
3110 return Err(DecodeError::UnknownVersion);
3113 let genesis_hash: Sha256dHash = Readable::read(reader)?;
3114 let latest_block_height: u32 = Readable::read(reader)?;
3115 let last_block_hash: Sha256dHash = Readable::read(reader)?;
3117 let mut closed_channels = Vec::new();
3119 let channel_count: u64 = Readable::read(reader)?;
3120 let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
3121 let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
3122 let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
3123 for _ in 0..channel_count {
3124 let mut channel: Channel = ReadableArgs::read(reader, args.logger.clone())?;
3125 if channel.last_block_connected != last_block_hash {
3126 return Err(DecodeError::InvalidValue);
3129 let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
3130 funding_txo_set.insert(funding_txo.clone());
3131 if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
3132 if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
3133 channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
3134 channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
3135 let mut force_close_res = channel.force_shutdown();
3136 force_close_res.0 = monitor.get_latest_local_commitment_txn();
3137 closed_channels.push(force_close_res);
3139 if let Some(short_channel_id) = channel.get_short_channel_id() {
3140 short_to_id.insert(short_channel_id, channel.channel_id());
3142 by_id.insert(channel.channel_id(), channel);
3145 return Err(DecodeError::InvalidValue);
3149 for (ref funding_txo, ref monitor) in args.channel_monitors.iter() {
3150 if !funding_txo_set.contains(funding_txo) {
3151 closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
3155 let forward_htlcs_count: u64 = Readable::read(reader)?;
3156 let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
3157 for _ in 0..forward_htlcs_count {
3158 let short_channel_id = Readable::read(reader)?;
3159 let pending_forwards_count: u64 = Readable::read(reader)?;
3160 let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, 128));
3161 for _ in 0..pending_forwards_count {
3162 pending_forwards.push(Readable::read(reader)?);
3164 forward_htlcs.insert(short_channel_id, pending_forwards);
3167 let claimable_htlcs_count: u64 = Readable::read(reader)?;
3168 let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
3169 for _ in 0..claimable_htlcs_count {
3170 let payment_hash = Readable::read(reader)?;
3171 let previous_hops_len: u64 = Readable::read(reader)?;
3172 let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
3173 for _ in 0..previous_hops_len {
3174 previous_hops.push(Readable::read(reader)?);
3176 claimable_htlcs.insert(payment_hash, previous_hops);
3179 let channel_manager = ChannelManager {
3181 fee_estimator: args.fee_estimator,
3182 monitor: args.monitor,
3183 chain_monitor: args.chain_monitor,
3184 tx_broadcaster: args.tx_broadcaster,
3186 latest_block_height: AtomicUsize::new(latest_block_height as usize),
3187 last_block_hash: Mutex::new(last_block_hash),
3188 secp_ctx: Secp256k1::new(),
3190 channel_state: Mutex::new(ChannelHolder {
3193 next_forward: Instant::now(),
3196 pending_msg_events: Vec::new(),
3198 our_network_key: args.keys_manager.get_node_secret(),
3200 pending_events: Mutex::new(Vec::new()),
3201 total_consistency_lock: RwLock::new(()),
3202 keys_manager: args.keys_manager,
3203 logger: args.logger,
3204 default_configuration: args.default_config,
3207 for close_res in closed_channels.drain(..) {
3208 channel_manager.finish_force_close_channel(close_res);
3209 //TODO: Broadcast channel update for closed channels, but only after we've made a
3210 //connection or two.
3213 Ok((last_block_hash.clone(), channel_manager))
3219 use chain::chaininterface;
3220 use chain::transaction::OutPoint;
3221 use chain::chaininterface::{ChainListener, ChainWatchInterface};
3222 use chain::keysinterface::KeysInterface;
3223 use chain::keysinterface;
3224 use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder};
3225 use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor};
3226 use ln::router::{Route, RouteHop, Router};
3228 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
3229 use util::test_utils;
3230 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
3231 use util::errors::APIError;
3232 use util::logger::Logger;
3233 use util::ser::{Writeable, Writer, ReadableArgs};
3234 use util::config::UserConfig;
3236 use bitcoin::util::hash::{BitcoinHash, Sha256dHash};
3237 use bitcoin::blockdata::block::{Block, BlockHeader};
3238 use bitcoin::blockdata::transaction::{Transaction, TxOut};
3239 use bitcoin::blockdata::constants::genesis_block;
3240 use bitcoin::network::constants::Network;
3244 use secp256k1::{Secp256k1, Message};
3245 use secp256k1::key::{PublicKey,SecretKey};
3247 use crypto::sha2::Sha256;
3248 use crypto::digest::Digest;
3250 use rand::{thread_rng,Rng};
3252 use std::cell::RefCell;
3253 use std::collections::{BTreeSet, HashMap};
3254 use std::default::Default;
3256 use std::sync::{Arc, Mutex};
3257 use std::sync::atomic::Ordering;
3258 use std::time::Instant;
3261 fn build_test_onion_keys() -> Vec<OnionKeys> {
3262 // Keys from BOLT 4, used in both test vector tests
3263 let secp_ctx = Secp256k1::new();
3268 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
3269 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3272 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
3273 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3276 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
3277 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3280 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]).unwrap(),
3281 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3284 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()[..]).unwrap(),
3285 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
3290 let session_priv = SecretKey::from_slice(&secp_ctx, &hex::decode("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap();
3292 let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
3293 assert_eq!(onion_keys.len(), route.hops.len());
3298 fn onion_vectors() {
3299 // Packet creation test vectors from BOLT 4
3300 let onion_keys = build_test_onion_keys();
3302 assert_eq!(onion_keys[0].shared_secret[..], hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
3303 assert_eq!(onion_keys[0].blinding_factor[..], hex::decode("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
3304 assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
3305 assert_eq!(onion_keys[0].rho, hex::decode("ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986").unwrap()[..]);
3306 assert_eq!(onion_keys[0].mu, hex::decode("b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba").unwrap()[..]);
3308 assert_eq!(onion_keys[1].shared_secret[..], hex::decode("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
3309 assert_eq!(onion_keys[1].blinding_factor[..], hex::decode("bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f").unwrap()[..]);
3310 assert_eq!(onion_keys[1].ephemeral_pubkey.serialize()[..], hex::decode("028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2").unwrap()[..]);
3311 assert_eq!(onion_keys[1].rho, hex::decode("450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59").unwrap()[..]);
3312 assert_eq!(onion_keys[1].mu, hex::decode("05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9").unwrap()[..]);
3314 assert_eq!(onion_keys[2].shared_secret[..], hex::decode("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
3315 assert_eq!(onion_keys[2].blinding_factor[..], hex::decode("a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5").unwrap()[..]);
3316 assert_eq!(onion_keys[2].ephemeral_pubkey.serialize()[..], hex::decode("03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0").unwrap()[..]);
3317 assert_eq!(onion_keys[2].rho, hex::decode("11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea").unwrap()[..]);
3318 assert_eq!(onion_keys[2].mu, hex::decode("caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78").unwrap()[..]);
3320 assert_eq!(onion_keys[3].shared_secret[..], hex::decode("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
3321 assert_eq!(onion_keys[3].blinding_factor[..], hex::decode("7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262").unwrap()[..]);
3322 assert_eq!(onion_keys[3].ephemeral_pubkey.serialize()[..], hex::decode("031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595").unwrap()[..]);
3323 assert_eq!(onion_keys[3].rho, hex::decode("cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e").unwrap()[..]);
3324 assert_eq!(onion_keys[3].mu, hex::decode("5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9").unwrap()[..]);
3326 assert_eq!(onion_keys[4].shared_secret[..], hex::decode("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
3327 assert_eq!(onion_keys[4].blinding_factor[..], hex::decode("c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205").unwrap()[..]);
3328 assert_eq!(onion_keys[4].ephemeral_pubkey.serialize()[..], hex::decode("03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4").unwrap()[..]);
3329 assert_eq!(onion_keys[4].rho, hex::decode("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
3330 assert_eq!(onion_keys[4].mu, hex::decode("8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a").unwrap()[..]);
3332 // Test vectors below are flat-out wrong: they claim to set outgoing_cltv_value to non-0 :/
3333 let payloads = vec!(
3334 msgs::OnionHopData {
3336 data: msgs::OnionRealm0HopData {
3337 short_channel_id: 0,
3339 outgoing_cltv_value: 0,
3343 msgs::OnionHopData {
3345 data: msgs::OnionRealm0HopData {
3346 short_channel_id: 0x0101010101010101,
3347 amt_to_forward: 0x0100000001,
3348 outgoing_cltv_value: 0,
3352 msgs::OnionHopData {
3354 data: msgs::OnionRealm0HopData {
3355 short_channel_id: 0x0202020202020202,
3356 amt_to_forward: 0x0200000002,
3357 outgoing_cltv_value: 0,
3361 msgs::OnionHopData {
3363 data: msgs::OnionRealm0HopData {
3364 short_channel_id: 0x0303030303030303,
3365 amt_to_forward: 0x0300000003,
3366 outgoing_cltv_value: 0,
3370 msgs::OnionHopData {
3372 data: msgs::OnionRealm0HopData {
3373 short_channel_id: 0x0404040404040404,
3374 amt_to_forward: 0x0400000004,
3375 outgoing_cltv_value: 0,
3381 let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]);
3382 // Just check the final packet encoding, as it includes all the per-hop vectors in it
3384 assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
3388 fn test_failure_packet_onion() {
3389 // Returning Errors test vectors from BOLT 4
3391 let onion_keys = build_test_onion_keys();
3392 let onion_error = ChannelManager::build_failure_packet(&onion_keys[4].shared_secret[..], 0x2002, &[0; 0]);
3393 assert_eq!(onion_error.encode(), hex::decode("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap());
3395 let onion_packet_1 = ChannelManager::encrypt_failure_packet(&onion_keys[4].shared_secret[..], &onion_error.encode()[..]);
3396 assert_eq!(onion_packet_1.data, hex::decode("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap());
3398 let onion_packet_2 = ChannelManager::encrypt_failure_packet(&onion_keys[3].shared_secret[..], &onion_packet_1.data[..]);
3399 assert_eq!(onion_packet_2.data, hex::decode("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap());
3401 let onion_packet_3 = ChannelManager::encrypt_failure_packet(&onion_keys[2].shared_secret[..], &onion_packet_2.data[..]);
3402 assert_eq!(onion_packet_3.data, hex::decode("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap());
3404 let onion_packet_4 = ChannelManager::encrypt_failure_packet(&onion_keys[1].shared_secret[..], &onion_packet_3.data[..]);
3405 assert_eq!(onion_packet_4.data, hex::decode("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap());
3407 let onion_packet_5 = ChannelManager::encrypt_failure_packet(&onion_keys[0].shared_secret[..], &onion_packet_4.data[..]);
3408 assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
3411 fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
3412 assert!(chain.does_match_tx(tx));
3413 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3414 chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
3416 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3417 chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
3422 chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
3423 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
3424 chan_monitor: Arc<test_utils::TestChannelMonitor>,
3425 node: Arc<ChannelManager>,
3427 node_seed: [u8; 32],
3428 network_payment_count: Rc<RefCell<u8>>,
3429 network_chan_count: Rc<RefCell<u32>>,
3431 impl Drop for Node {
3432 fn drop(&mut self) {
3433 if !::std::thread::panicking() {
3434 // Check that we processed all pending events
3435 assert_eq!(self.node.get_and_clear_pending_msg_events().len(), 0);
3436 assert_eq!(self.node.get_and_clear_pending_events().len(), 0);
3437 assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0);
3442 fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3443 create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
3446 fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3447 let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
3448 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
3449 (announcement, as_update, bs_update, channel_id, tx)
3452 macro_rules! get_revoke_commit_msgs {
3453 ($node: expr, $node_id: expr) => {
3455 let events = $node.node.get_and_clear_pending_msg_events();
3456 assert_eq!(events.len(), 2);
3458 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
3459 assert_eq!(*node_id, $node_id);
3462 _ => panic!("Unexpected event"),
3463 }, match events[1] {
3464 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3465 assert_eq!(*node_id, $node_id);
3466 assert!(updates.update_add_htlcs.is_empty());
3467 assert!(updates.update_fulfill_htlcs.is_empty());
3468 assert!(updates.update_fail_htlcs.is_empty());
3469 assert!(updates.update_fail_malformed_htlcs.is_empty());
3470 assert!(updates.update_fee.is_none());
3471 updates.commitment_signed.clone()
3473 _ => panic!("Unexpected event"),
3479 macro_rules! get_event_msg {
3480 ($node: expr, $event_type: path, $node_id: expr) => {
3482 let events = $node.node.get_and_clear_pending_msg_events();
3483 assert_eq!(events.len(), 1);
3485 $event_type { ref node_id, ref msg } => {
3486 assert_eq!(*node_id, $node_id);
3489 _ => panic!("Unexpected event"),
3495 macro_rules! get_htlc_update_msgs {
3496 ($node: expr, $node_id: expr) => {
3498 let events = $node.node.get_and_clear_pending_msg_events();
3499 assert_eq!(events.len(), 1);
3501 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3502 assert_eq!(*node_id, $node_id);
3505 _ => panic!("Unexpected event"),
3511 fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> Transaction {
3512 node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
3513 node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
3514 node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
3516 let chan_id = *node_a.network_chan_count.borrow();
3520 let events_2 = node_a.node.get_and_clear_pending_events();
3521 assert_eq!(events_2.len(), 1);
3523 Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
3524 assert_eq!(*channel_value_satoshis, channel_value);
3525 assert_eq!(user_channel_id, 42);
3527 tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
3528 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
3530 funding_output = OutPoint::new(tx.txid(), 0);
3532 node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
3533 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
3534 assert_eq!(added_monitors.len(), 1);
3535 assert_eq!(added_monitors[0].0, funding_output);
3536 added_monitors.clear();
3538 _ => panic!("Unexpected event"),
3541 node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap();
3543 let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
3544 assert_eq!(added_monitors.len(), 1);
3545 assert_eq!(added_monitors[0].0, funding_output);
3546 added_monitors.clear();
3549 node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())).unwrap();
3551 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
3552 assert_eq!(added_monitors.len(), 1);
3553 assert_eq!(added_monitors[0].0, funding_output);
3554 added_monitors.clear();
3557 let events_4 = node_a.node.get_and_clear_pending_events();
3558 assert_eq!(events_4.len(), 1);
3560 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
3561 assert_eq!(user_channel_id, 42);
3562 assert_eq!(*funding_txo, funding_output);
3564 _ => panic!("Unexpected event"),
3570 fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
3571 confirm_transaction(&node_b.chain_monitor, &tx, tx.version);
3572 node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingLocked, node_a.node.get_our_node_id())).unwrap();
3576 confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
3577 let events_6 = node_a.node.get_and_clear_pending_msg_events();
3578 assert_eq!(events_6.len(), 2);
3579 ((match events_6[0] {
3580 MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
3581 channel_id = msg.channel_id.clone();
3582 assert_eq!(*node_id, node_b.node.get_our_node_id());
3585 _ => panic!("Unexpected event"),
3586 }, match events_6[1] {
3587 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
3588 assert_eq!(*node_id, node_b.node.get_our_node_id());
3591 _ => panic!("Unexpected event"),
3595 fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
3596 let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
3597 let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
3601 fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
3602 node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap();
3603 let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
3604 node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
3606 let events_7 = node_b.node.get_and_clear_pending_msg_events();
3607 assert_eq!(events_7.len(), 1);
3608 let (announcement, bs_update) = match events_7[0] {
3609 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
3612 _ => panic!("Unexpected event"),
3615 node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
3616 let events_8 = node_a.node.get_and_clear_pending_msg_events();
3617 assert_eq!(events_8.len(), 1);
3618 let as_update = match events_8[0] {
3619 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
3620 assert!(*announcement == *msg);
3623 _ => panic!("Unexpected event"),
3626 *node_a.network_chan_count.borrow_mut() += 1;
3628 ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
3631 fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3632 create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
3635 fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
3636 let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
3638 assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
3639 node.router.handle_channel_update(&chan_announcement.1).unwrap();
3640 node.router.handle_channel_update(&chan_announcement.2).unwrap();
3642 (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
3645 macro_rules! check_spends {
3646 ($tx: expr, $spends_tx: expr) => {
3648 let mut funding_tx_map = HashMap::new();
3649 let spends_tx = $spends_tx;
3650 funding_tx_map.insert(spends_tx.txid(), spends_tx);
3651 $tx.verify(&funding_tx_map).unwrap();
3656 macro_rules! get_closing_signed_broadcast {
3657 ($node: expr, $dest_pubkey: expr) => {
3659 let events = $node.get_and_clear_pending_msg_events();
3660 assert!(events.len() == 1 || events.len() == 2);
3661 (match events[events.len() - 1] {
3662 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
3663 assert_eq!(msg.contents.flags & 2, 2);
3666 _ => panic!("Unexpected event"),
3667 }, if events.len() == 2 {
3669 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
3670 assert_eq!(*node_id, $dest_pubkey);
3673 _ => panic!("Unexpected event"),
3680 fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
3681 let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
3682 let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
3685 node_a.close_channel(channel_id).unwrap();
3686 node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())).unwrap();
3688 let events_1 = node_b.get_and_clear_pending_msg_events();
3689 assert!(events_1.len() >= 1);
3690 let shutdown_b = match events_1[0] {
3691 MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
3692 assert_eq!(node_id, &node_a.get_our_node_id());
3695 _ => panic!("Unexpected event"),
3698 let closing_signed_b = if !close_inbound_first {
3699 assert_eq!(events_1.len(), 1);
3702 Some(match events_1[1] {
3703 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
3704 assert_eq!(node_id, &node_a.get_our_node_id());
3707 _ => panic!("Unexpected event"),
3711 node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap();
3712 let (as_update, bs_update) = if close_inbound_first {
3713 assert!(node_a.get_and_clear_pending_msg_events().is_empty());
3714 node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
3715 assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
3716 tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
3717 let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
3719 node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
3720 let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
3721 assert!(none_b.is_none());
3722 assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
3723 tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
3724 (as_update, bs_update)
3726 let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id());
3728 node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a).unwrap();
3729 assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
3730 tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
3731 let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
3733 node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
3734 let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
3735 assert!(none_a.is_none());
3736 assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
3737 tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
3738 (as_update, bs_update)
3740 assert_eq!(tx_a, tx_b);
3741 check_spends!(tx_a, funding_tx);
3743 (as_update, bs_update)
3748 msgs: Vec<msgs::UpdateAddHTLC>,
3749 commitment_msg: msgs::CommitmentSigned,
3752 fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent {
3753 assert!(updates.update_fulfill_htlcs.is_empty());
3754 assert!(updates.update_fail_htlcs.is_empty());
3755 assert!(updates.update_fail_malformed_htlcs.is_empty());
3756 assert!(updates.update_fee.is_none());
3757 SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
3760 fn from_event(event: MessageSendEvent) -> SendEvent {
3762 MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates),
3763 _ => panic!("Unexpected event type!"),
3768 macro_rules! check_added_monitors {
3769 ($node: expr, $count: expr) => {
3771 let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
3772 assert_eq!(added_monitors.len(), $count);
3773 added_monitors.clear();
3778 macro_rules! commitment_signed_dance {
3779 ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
3781 check_added_monitors!($node_a, 0);
3782 assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
3783 $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
3784 check_added_monitors!($node_a, 1);
3785 commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
3788 ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
3790 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
3791 check_added_monitors!($node_b, 0);
3792 assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
3793 $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
3794 assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
3795 check_added_monitors!($node_b, 1);
3796 $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap();
3797 let (bs_revoke_and_ack, extra_msg_option) = {
3798 let events = $node_b.node.get_and_clear_pending_msg_events();
3799 assert!(events.len() <= 2);
3801 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
3802 assert_eq!(*node_id, $node_a.node.get_our_node_id());
3805 _ => panic!("Unexpected event"),
3806 }, events.get(1).map(|e| e.clone()))
3808 check_added_monitors!($node_b, 1);
3809 if $fail_backwards {
3810 assert!($node_a.node.get_and_clear_pending_events().is_empty());
3811 assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
3813 $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
3815 let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
3816 if $fail_backwards {
3817 assert_eq!(added_monitors.len(), 2);
3818 assert!(added_monitors[0].0 != added_monitors[1].0);
3820 assert_eq!(added_monitors.len(), 1);
3822 added_monitors.clear();
3827 ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
3829 assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
3832 ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
3834 commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
3835 if $fail_backwards {
3836 let channel_state = $node_a.node.channel_state.lock().unwrap();
3837 assert_eq!(channel_state.pending_msg_events.len(), 1);
3838 if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
3839 assert_ne!(*node_id, $node_b.node.get_our_node_id());
3840 } else { panic!("Unexpected event"); }
3842 assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
3848 macro_rules! get_payment_preimage_hash {
3851 let payment_preimage = [*$node.network_payment_count.borrow(); 32];
3852 *$node.network_payment_count.borrow_mut() += 1;
3853 let mut payment_hash = [0; 32];
3854 let mut sha = Sha256::new();
3855 sha.input(&payment_preimage[..]);
3856 sha.result(&mut payment_hash);
3857 (payment_preimage, payment_hash)
3862 fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
3863 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
3865 let mut payment_event = {
3866 origin_node.node.send_payment(route, our_payment_hash).unwrap();
3867 check_added_monitors!(origin_node, 1);
3869 let mut events = origin_node.node.get_and_clear_pending_msg_events();
3870 assert_eq!(events.len(), 1);
3871 SendEvent::from_event(events.remove(0))
3873 let mut prev_node = origin_node;
3875 for (idx, &node) in expected_route.iter().enumerate() {
3876 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
3878 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3879 check_added_monitors!(node, 0);
3880 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
3882 let events_1 = node.node.get_and_clear_pending_events();
3883 assert_eq!(events_1.len(), 1);
3885 Event::PendingHTLCsForwardable { .. } => { },
3886 _ => panic!("Unexpected event"),
3889 node.node.channel_state.lock().unwrap().next_forward = Instant::now();
3890 node.node.process_pending_htlc_forwards();
3892 if idx == expected_route.len() - 1 {
3893 let events_2 = node.node.get_and_clear_pending_events();
3894 assert_eq!(events_2.len(), 1);
3896 Event::PaymentReceived { ref payment_hash, amt } => {
3897 assert_eq!(our_payment_hash, *payment_hash);
3898 assert_eq!(amt, recv_value);
3900 _ => panic!("Unexpected event"),
3903 let mut events_2 = node.node.get_and_clear_pending_msg_events();
3904 assert_eq!(events_2.len(), 1);
3905 check_added_monitors!(node, 1);
3906 payment_event = SendEvent::from_event(events_2.remove(0));
3907 assert_eq!(payment_event.msgs.len(), 1);
3913 (our_payment_preimage, our_payment_hash)
3916 fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) {
3917 assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
3918 check_added_monitors!(expected_route.last().unwrap(), 1);
3920 let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
3921 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
3922 macro_rules! get_next_msgs {
3925 let events = $node.node.get_and_clear_pending_msg_events();
3926 assert_eq!(events.len(), 1);
3928 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
3929 assert!(update_add_htlcs.is_empty());
3930 assert_eq!(update_fulfill_htlcs.len(), 1);
3931 assert!(update_fail_htlcs.is_empty());
3932 assert!(update_fail_malformed_htlcs.is_empty());
3933 assert!(update_fee.is_none());
3934 expected_next_node = node_id.clone();
3935 Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()))
3937 _ => panic!("Unexpected event"),
3943 macro_rules! last_update_fulfill_dance {
3944 ($node: expr, $prev_node: expr) => {
3946 $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3947 check_added_monitors!($node, 0);
3948 assert!($node.node.get_and_clear_pending_msg_events().is_empty());
3949 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
3953 macro_rules! mid_update_fulfill_dance {
3954 ($node: expr, $prev_node: expr, $new_msgs: expr) => {
3956 $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3957 check_added_monitors!($node, 1);
3958 let new_next_msgs = if $new_msgs {
3959 get_next_msgs!($node)
3961 assert!($node.node.get_and_clear_pending_msg_events().is_empty());
3964 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
3965 next_msgs = new_next_msgs;
3970 let mut prev_node = expected_route.last().unwrap();
3971 for (idx, node) in expected_route.iter().rev().enumerate() {
3972 assert_eq!(expected_next_node, node.node.get_our_node_id());
3973 let update_next_msgs = !skip_last || idx != expected_route.len() - 1;
3974 if next_msgs.is_some() {
3975 mid_update_fulfill_dance!(node, prev_node, update_next_msgs);
3976 } else if update_next_msgs {
3977 next_msgs = get_next_msgs!(node);
3979 assert!(node.node.get_and_clear_pending_msg_events().is_empty());
3981 if !skip_last && idx == expected_route.len() - 1 {
3982 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
3989 last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
3990 let events = origin_node.node.get_and_clear_pending_events();
3991 assert_eq!(events.len(), 1);
3993 Event::PaymentSent { payment_preimage } => {
3994 assert_eq!(payment_preimage, our_payment_preimage);
3996 _ => panic!("Unexpected event"),
4001 fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) {
4002 claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage);
4005 const TEST_FINAL_CLTV: u32 = 32;
4007 fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
4008 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
4009 assert_eq!(route.hops.len(), expected_route.len());
4010 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
4011 assert_eq!(hop.pubkey, node.node.get_our_node_id());
4014 send_along_route(origin_node, route, expected_route, recv_value)
4017 fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
4018 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
4019 assert_eq!(route.hops.len(), expected_route.len());
4020 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
4021 assert_eq!(hop.pubkey, node.node.get_our_node_id());
4024 let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
4026 let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
4028 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
4029 _ => panic!("Unknown error variants"),
4033 fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
4034 let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
4035 claim_payment(&origin, expected_route, our_payment_preimage);
4038 fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) {
4039 assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, PaymentFailReason::PreimageUnknown));
4040 check_added_monitors!(expected_route.last().unwrap(), 1);
4042 let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
4043 macro_rules! update_fail_dance {
4044 ($node: expr, $prev_node: expr, $last_node: expr) => {
4046 $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
4047 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
4052 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
4053 let mut prev_node = expected_route.last().unwrap();
4054 for (idx, node) in expected_route.iter().rev().enumerate() {
4055 assert_eq!(expected_next_node, node.node.get_our_node_id());
4056 if next_msgs.is_some() {
4057 // We may be the "last node" for the purpose of the commitment dance if we're
4058 // skipping the last node (implying it is disconnected) and we're the
4059 // second-to-last node!
4060 update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1);
4063 let events = node.node.get_and_clear_pending_msg_events();
4064 if !skip_last || idx != expected_route.len() - 1 {
4065 assert_eq!(events.len(), 1);
4067 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4068 assert!(update_add_htlcs.is_empty());
4069 assert!(update_fulfill_htlcs.is_empty());
4070 assert_eq!(update_fail_htlcs.len(), 1);
4071 assert!(update_fail_malformed_htlcs.is_empty());
4072 assert!(update_fee.is_none());
4073 expected_next_node = node_id.clone();
4074 next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
4076 _ => panic!("Unexpected event"),
4079 assert!(events.is_empty());
4081 if !skip_last && idx == expected_route.len() - 1 {
4082 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
4089 update_fail_dance!(origin_node, expected_route.first().unwrap(), true);
4091 let events = origin_node.node.get_and_clear_pending_events();
4092 assert_eq!(events.len(), 1);
4094 Event::PaymentFailed { payment_hash, rejected_by_dest } => {
4095 assert_eq!(payment_hash, our_payment_hash);
4096 assert!(rejected_by_dest);
4098 _ => panic!("Unexpected event"),
4103 fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) {
4104 fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
4107 fn create_network(node_count: usize) -> Vec<Node> {
4108 let mut nodes = Vec::new();
4109 let mut rng = thread_rng();
4110 let secp_ctx = Secp256k1::new();
4111 let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
4113 let chan_count = Rc::new(RefCell::new(0));
4114 let payment_count = Rc::new(RefCell::new(0));
4116 for _ in 0..node_count {
4117 let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
4118 let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
4119 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
4120 let mut seed = [0; 32];
4121 rng.fill_bytes(&mut seed);
4122 let keys_manager = Arc::new(keysinterface::KeysManager::new(&seed, Network::Testnet, Arc::clone(&logger)));
4123 let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone()));
4124 let mut config = UserConfig::new();
4125 config.channel_options.announced_channel = true;
4126 config.channel_limits.force_announced_channel_preference = false;
4127 let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap();
4128 let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
4129 nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, node_seed: seed,
4130 network_payment_count: payment_count.clone(),
4131 network_chan_count: chan_count.clone(),
4139 fn test_async_inbound_update_fee() {
4140 let mut nodes = create_network(2);
4141 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4142 let channel_id = chan.2;
4144 macro_rules! get_feerate {
4146 let chan_lock = $node.node.channel_state.lock().unwrap();
4147 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4153 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4157 // send (1) commitment_signed -.
4158 // <- update_add_htlc/commitment_signed
4159 // send (2) RAA (awaiting remote revoke) -.
4160 // (1) commitment_signed is delivered ->
4161 // .- send (3) RAA (awaiting remote revoke)
4162 // (2) RAA is delivered ->
4163 // .- send (4) commitment_signed
4164 // <- (3) RAA is delivered
4165 // send (5) commitment_signed -.
4166 // <- (4) commitment_signed is delivered
4168 // (5) commitment_signed is delivered ->
4170 // (6) RAA is delivered ->
4172 // First nodes[0] generates an update_fee
4173 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
4174 check_added_monitors!(nodes[0], 1);
4176 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4177 assert_eq!(events_0.len(), 1);
4178 let (update_msg, commitment_signed) = match events_0[0] { // (1)
4179 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
4180 (update_fee.as_ref(), commitment_signed)
4182 _ => panic!("Unexpected event"),
4185 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4187 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
4188 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4189 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
4190 check_added_monitors!(nodes[1], 1);
4192 let payment_event = {
4193 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
4194 assert_eq!(events_1.len(), 1);
4195 SendEvent::from_event(events_1.remove(0))
4197 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
4198 assert_eq!(payment_event.msgs.len(), 1);
4200 // ...now when the messages get delivered everyone should be happy
4201 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4202 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
4203 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4204 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
4205 check_added_monitors!(nodes[0], 1);
4207 // deliver(1), generate (3):
4208 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4209 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4210 // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
4211 check_added_monitors!(nodes[1], 1);
4213 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); // deliver (2)
4214 let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4215 assert!(bs_update.update_add_htlcs.is_empty()); // (4)
4216 assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
4217 assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
4218 assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
4219 assert!(bs_update.update_fee.is_none()); // (4)
4220 check_added_monitors!(nodes[1], 1);
4222 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); // deliver (3)
4223 let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4224 assert!(as_update.update_add_htlcs.is_empty()); // (5)
4225 assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
4226 assert!(as_update.update_fail_htlcs.is_empty()); // (5)
4227 assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
4228 assert!(as_update.update_fee.is_none()); // (5)
4229 check_added_monitors!(nodes[0], 1);
4231 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); // deliver (4)
4232 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4233 // only (6) so get_event_msg's assert(len == 1) passes
4234 check_added_monitors!(nodes[0], 1);
4236 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed).unwrap(); // deliver (5)
4237 let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4238 check_added_monitors!(nodes[1], 1);
4240 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
4241 check_added_monitors!(nodes[0], 1);
4243 let events_2 = nodes[0].node.get_and_clear_pending_events();
4244 assert_eq!(events_2.len(), 1);
4246 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
4247 _ => panic!("Unexpected event"),
4250 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); // deliver (6)
4251 check_added_monitors!(nodes[1], 1);
4255 fn test_update_fee_unordered_raa() {
4256 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
4257 // crash in an earlier version of the update_fee patch)
4258 let mut nodes = create_network(2);
4259 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4260 let channel_id = chan.2;
4262 macro_rules! get_feerate {
4264 let chan_lock = $node.node.channel_state.lock().unwrap();
4265 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4271 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4273 // First nodes[0] generates an update_fee
4274 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
4275 check_added_monitors!(nodes[0], 1);
4277 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4278 assert_eq!(events_0.len(), 1);
4279 let update_msg = match events_0[0] { // (1)
4280 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
4283 _ => panic!("Unexpected event"),
4286 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4288 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
4289 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4290 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
4291 check_added_monitors!(nodes[1], 1);
4293 let payment_event = {
4294 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
4295 assert_eq!(events_1.len(), 1);
4296 SendEvent::from_event(events_1.remove(0))
4298 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
4299 assert_eq!(payment_event.msgs.len(), 1);
4301 // ...now when the messages get delivered everyone should be happy
4302 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4303 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
4304 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4305 // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
4306 check_added_monitors!(nodes[0], 1);
4308 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2)
4309 check_added_monitors!(nodes[1], 1);
4311 // We can't continue, sadly, because our (1) now has a bogus signature
4315 fn test_multi_flight_update_fee() {
4316 let nodes = create_network(2);
4317 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4318 let channel_id = chan.2;
4320 macro_rules! get_feerate {
4322 let chan_lock = $node.node.channel_state.lock().unwrap();
4323 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4329 // update_fee/commitment_signed ->
4330 // .- send (1) RAA and (2) commitment_signed
4331 // update_fee (never committed) ->
4332 // (3) update_fee ->
4333 // We have to manually generate the above update_fee, it is allowed by the protocol but we
4334 // don't track which updates correspond to which revoke_and_ack responses so we're in
4335 // AwaitingRAA mode and will not generate the update_fee yet.
4336 // <- (1) RAA delivered
4337 // (3) is generated and send (4) CS -.
4338 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
4339 // know the per_commitment_point to use for it.
4340 // <- (2) commitment_signed delivered
4341 // revoke_and_ack ->
4342 // B should send no response here
4343 // (4) commitment_signed delivered ->
4344 // <- RAA/commitment_signed delivered
4345 // revoke_and_ack ->
4347 // First nodes[0] generates an update_fee
4348 let initial_feerate = get_feerate!(nodes[0]);
4349 nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
4350 check_added_monitors!(nodes[0], 1);
4352 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4353 assert_eq!(events_0.len(), 1);
4354 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
4355 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
4356 (update_fee.as_ref().unwrap(), commitment_signed)
4358 _ => panic!("Unexpected event"),
4361 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
4362 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap();
4363 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap();
4364 let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4365 check_added_monitors!(nodes[1], 1);
4367 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
4369 nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
4370 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4371 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4373 // Create the (3) update_fee message that nodes[0] will generate before it does...
4374 let mut update_msg_2 = msgs::UpdateFee {
4375 channel_id: update_msg_1.channel_id.clone(),
4376 feerate_per_kw: (initial_feerate + 30) as u32,
4379 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
4381 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
4383 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
4385 // Deliver (1), generating (3) and (4)
4386 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap();
4387 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4388 check_added_monitors!(nodes[0], 1);
4389 assert!(as_second_update.update_add_htlcs.is_empty());
4390 assert!(as_second_update.update_fulfill_htlcs.is_empty());
4391 assert!(as_second_update.update_fail_htlcs.is_empty());
4392 assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
4393 // Check that the update_fee newly generated matches what we delivered:
4394 assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
4395 assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
4397 // Deliver (2) commitment_signed
4398 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
4399 let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4400 check_added_monitors!(nodes[0], 1);
4401 // No commitment_signed so get_event_msg's assert(len == 1) passes
4403 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap();
4404 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4405 check_added_monitors!(nodes[1], 1);
4408 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed).unwrap();
4409 let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4410 check_added_monitors!(nodes[1], 1);
4412 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
4413 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4414 check_added_monitors!(nodes[0], 1);
4416 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment).unwrap();
4417 let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4418 // No commitment_signed so get_event_msg's assert(len == 1) passes
4419 check_added_monitors!(nodes[0], 1);
4421 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap();
4422 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4423 check_added_monitors!(nodes[1], 1);
4427 fn test_update_fee_vanilla() {
4428 let nodes = create_network(2);
4429 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4430 let channel_id = chan.2;
4432 macro_rules! get_feerate {
4434 let chan_lock = $node.node.channel_state.lock().unwrap();
4435 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4440 let feerate = get_feerate!(nodes[0]);
4441 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
4442 check_added_monitors!(nodes[0], 1);
4444 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4445 assert_eq!(events_0.len(), 1);
4446 let (update_msg, commitment_signed) = match events_0[0] {
4447 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4448 (update_fee.as_ref(), commitment_signed)
4450 _ => panic!("Unexpected event"),
4452 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4454 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4455 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4456 check_added_monitors!(nodes[1], 1);
4458 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4459 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4460 check_added_monitors!(nodes[0], 1);
4462 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
4463 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4464 // No commitment_signed so get_event_msg's assert(len == 1) passes
4465 check_added_monitors!(nodes[0], 1);
4467 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
4468 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4469 check_added_monitors!(nodes[1], 1);
4473 fn test_update_fee_with_fundee_update_add_htlc() {
4474 let mut nodes = create_network(2);
4475 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4476 let channel_id = chan.2;
4478 macro_rules! get_feerate {
4480 let chan_lock = $node.node.channel_state.lock().unwrap();
4481 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4487 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4489 let feerate = get_feerate!(nodes[0]);
4490 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
4491 check_added_monitors!(nodes[0], 1);
4493 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4494 assert_eq!(events_0.len(), 1);
4495 let (update_msg, commitment_signed) = match events_0[0] {
4496 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4497 (update_fee.as_ref(), commitment_signed)
4499 _ => panic!("Unexpected event"),
4501 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4502 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4503 let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4504 check_added_monitors!(nodes[1], 1);
4506 let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap();
4508 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]);
4510 // nothing happens since node[1] is in AwaitingRemoteRevoke
4511 nodes[1].node.send_payment(route, our_payment_hash).unwrap();
4513 let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
4514 assert_eq!(added_monitors.len(), 0);
4515 added_monitors.clear();
4517 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4518 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4519 // node[1] has nothing to do
4521 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4522 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4523 check_added_monitors!(nodes[0], 1);
4525 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
4526 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4527 // No commitment_signed so get_event_msg's assert(len == 1) passes
4528 check_added_monitors!(nodes[0], 1);
4529 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
4530 check_added_monitors!(nodes[1], 1);
4531 // AwaitingRemoteRevoke ends here
4533 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4534 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
4535 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
4536 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
4537 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
4538 assert_eq!(commitment_update.update_fee.is_none(), true);
4540 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap();
4541 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
4542 check_added_monitors!(nodes[0], 1);
4543 let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4545 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap();
4546 check_added_monitors!(nodes[1], 1);
4547 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4549 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap();
4550 check_added_monitors!(nodes[1], 1);
4551 let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4552 // No commitment_signed so get_event_msg's assert(len == 1) passes
4554 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap();
4555 check_added_monitors!(nodes[0], 1);
4556 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4558 let events = nodes[0].node.get_and_clear_pending_events();
4559 assert_eq!(events.len(), 1);
4561 Event::PendingHTLCsForwardable { .. } => { },
4562 _ => panic!("Unexpected event"),
4564 nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
4565 nodes[0].node.process_pending_htlc_forwards();
4567 let events = nodes[0].node.get_and_clear_pending_events();
4568 assert_eq!(events.len(), 1);
4570 Event::PaymentReceived { .. } => { },
4571 _ => panic!("Unexpected event"),
4574 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
4576 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
4577 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
4578 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
4582 fn test_update_fee() {
4583 let nodes = create_network(2);
4584 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
4585 let channel_id = chan.2;
4587 macro_rules! get_feerate {
4589 let chan_lock = $node.node.channel_state.lock().unwrap();
4590 let chan = chan_lock.by_id.get(&channel_id).unwrap();
4596 // (1) update_fee/commitment_signed ->
4597 // <- (2) revoke_and_ack
4598 // .- send (3) commitment_signed
4599 // (4) update_fee/commitment_signed ->
4600 // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
4601 // <- (3) commitment_signed delivered
4602 // send (6) revoke_and_ack -.
4603 // <- (5) deliver revoke_and_ack
4604 // (6) deliver revoke_and_ack ->
4605 // .- send (7) commitment_signed in response to (4)
4606 // <- (7) deliver commitment_signed
4607 // revoke_and_ack ->
4609 // Create and deliver (1)...
4610 let feerate = get_feerate!(nodes[0]);
4611 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
4612 check_added_monitors!(nodes[0], 1);
4614 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4615 assert_eq!(events_0.len(), 1);
4616 let (update_msg, commitment_signed) = match events_0[0] {
4617 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4618 (update_fee.as_ref(), commitment_signed)
4620 _ => panic!("Unexpected event"),
4622 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4624 // Generate (2) and (3):
4625 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4626 let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4627 check_added_monitors!(nodes[1], 1);
4630 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4631 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4632 check_added_monitors!(nodes[0], 1);
4634 // Create and deliver (4)...
4635 nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
4636 check_added_monitors!(nodes[0], 1);
4637 let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
4638 assert_eq!(events_0.len(), 1);
4639 let (update_msg, commitment_signed) = match events_0[0] {
4640 MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
4641 (update_fee.as_ref(), commitment_signed)
4643 _ => panic!("Unexpected event"),
4646 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
4647 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
4648 check_added_monitors!(nodes[1], 1);
4650 let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4651 // No commitment_signed so get_event_msg's assert(len == 1) passes
4653 // Handle (3), creating (6):
4654 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap();
4655 check_added_monitors!(nodes[0], 1);
4656 let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4657 // No commitment_signed so get_event_msg's assert(len == 1) passes
4660 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
4661 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4662 check_added_monitors!(nodes[0], 1);
4664 // Deliver (6), creating (7):
4665 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap();
4666 let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4667 assert!(commitment_update.update_add_htlcs.is_empty());
4668 assert!(commitment_update.update_fulfill_htlcs.is_empty());
4669 assert!(commitment_update.update_fail_htlcs.is_empty());
4670 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
4671 assert!(commitment_update.update_fee.is_none());
4672 check_added_monitors!(nodes[1], 1);
4675 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
4676 check_added_monitors!(nodes[0], 1);
4677 let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4678 // No commitment_signed so get_event_msg's assert(len == 1) passes
4680 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
4681 check_added_monitors!(nodes[1], 1);
4682 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4684 assert_eq!(get_feerate!(nodes[0]), feerate + 30);
4685 assert_eq!(get_feerate!(nodes[1]), feerate + 30);
4686 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
4690 fn pre_funding_lock_shutdown_test() {
4691 // Test sending a shutdown prior to funding_locked after funding generation
4692 let nodes = create_network(2);
4693 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0);
4694 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4695 nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
4696 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
4698 nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap();
4699 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4700 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4701 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4702 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4704 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4705 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4706 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4707 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4708 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4709 assert!(node_0_none.is_none());
4711 assert!(nodes[0].node.list_channels().is_empty());
4712 assert!(nodes[1].node.list_channels().is_empty());
4716 fn updates_shutdown_wait() {
4717 // Test sending a shutdown with outstanding updates pending
4718 let mut nodes = create_network(3);
4719 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4720 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4721 let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
4722 let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
4724 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
4726 nodes[0].node.close_channel(&chan_1.2).unwrap();
4727 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4728 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4729 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4730 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4732 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4733 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4735 let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
4736 if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {}
4737 else { panic!("New sends should fail!") };
4738 if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {}
4739 else { panic!("New sends should fail!") };
4741 assert!(nodes[2].node.claim_funds(our_payment_preimage));
4742 check_added_monitors!(nodes[2], 1);
4743 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4744 assert!(updates.update_add_htlcs.is_empty());
4745 assert!(updates.update_fail_htlcs.is_empty());
4746 assert!(updates.update_fail_malformed_htlcs.is_empty());
4747 assert!(updates.update_fee.is_none());
4748 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4749 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
4750 check_added_monitors!(nodes[1], 1);
4751 let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4752 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
4754 assert!(updates_2.update_add_htlcs.is_empty());
4755 assert!(updates_2.update_fail_htlcs.is_empty());
4756 assert!(updates_2.update_fail_malformed_htlcs.is_empty());
4757 assert!(updates_2.update_fee.is_none());
4758 assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
4759 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
4760 commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
4762 let events = nodes[0].node.get_and_clear_pending_events();
4763 assert_eq!(events.len(), 1);
4765 Event::PaymentSent { ref payment_preimage } => {
4766 assert_eq!(our_payment_preimage, *payment_preimage);
4768 _ => panic!("Unexpected event"),
4771 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4772 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4773 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4774 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4775 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4776 assert!(node_0_none.is_none());
4778 assert!(nodes[0].node.list_channels().is_empty());
4780 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
4781 nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
4782 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
4783 assert!(nodes[1].node.list_channels().is_empty());
4784 assert!(nodes[2].node.list_channels().is_empty());
4788 fn htlc_fail_async_shutdown() {
4789 // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
4790 let mut nodes = create_network(3);
4791 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4792 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4794 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
4795 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4796 nodes[0].node.send_payment(route, our_payment_hash).unwrap();
4797 check_added_monitors!(nodes[0], 1);
4798 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4799 assert_eq!(updates.update_add_htlcs.len(), 1);
4800 assert!(updates.update_fulfill_htlcs.is_empty());
4801 assert!(updates.update_fail_htlcs.is_empty());
4802 assert!(updates.update_fail_malformed_htlcs.is_empty());
4803 assert!(updates.update_fee.is_none());
4805 nodes[1].node.close_channel(&chan_1.2).unwrap();
4806 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4807 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4808 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4810 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
4811 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
4812 check_added_monitors!(nodes[1], 1);
4813 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4814 commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
4816 let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4817 assert!(updates_2.update_add_htlcs.is_empty());
4818 assert!(updates_2.update_fulfill_htlcs.is_empty());
4819 assert_eq!(updates_2.update_fail_htlcs.len(), 1);
4820 assert!(updates_2.update_fail_malformed_htlcs.is_empty());
4821 assert!(updates_2.update_fee.is_none());
4823 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap();
4824 commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
4826 let events = nodes[0].node.get_and_clear_pending_events();
4827 assert_eq!(events.len(), 1);
4829 Event::PaymentFailed { ref payment_hash, ref rejected_by_dest } => {
4830 assert_eq!(our_payment_hash, *payment_hash);
4831 assert!(!rejected_by_dest);
4833 _ => panic!("Unexpected event"),
4836 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4837 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4838 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4839 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4840 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4841 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4842 assert!(node_0_none.is_none());
4844 assert!(nodes[0].node.list_channels().is_empty());
4846 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
4847 nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
4848 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
4849 assert!(nodes[1].node.list_channels().is_empty());
4850 assert!(nodes[2].node.list_channels().is_empty());
4854 fn update_fee_async_shutdown() {
4855 // Test update_fee works after shutdown start if messages are delivered out-of-order
4856 let nodes = create_network(2);
4857 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4859 let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate();
4860 nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap();
4861 check_added_monitors!(nodes[0], 1);
4862 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4863 assert!(updates.update_add_htlcs.is_empty());
4864 assert!(updates.update_fulfill_htlcs.is_empty());
4865 assert!(updates.update_fail_htlcs.is_empty());
4866 assert!(updates.update_fail_malformed_htlcs.is_empty());
4867 assert!(updates.update_fee.is_some());
4869 nodes[1].node.close_channel(&chan_1.2).unwrap();
4870 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4871 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4872 // Note that we don't actually test normative behavior here. The spec indicates we could
4873 // actually send a closing_signed here, but is kinda unclear and could possibly be amended
4874 // to require waiting on the full commitment dance before doing so (see
4875 // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid
4876 // ambiguity, we should wait until after the full commitment dance to send closing_signed.
4877 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4879 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap();
4880 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
4881 check_added_monitors!(nodes[1], 1);
4882 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4883 let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true);
4885 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4886 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() {
4887 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
4888 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
4891 _ => panic!("Unexpected event"),
4893 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4894 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
4895 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
4896 assert!(node_0_none.is_none());
4899 fn do_test_shutdown_rebroadcast(recv_count: u8) {
4900 // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
4901 // messages delivered prior to disconnect
4902 let nodes = create_network(3);
4903 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4904 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
4906 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
4908 nodes[1].node.close_channel(&chan_1.2).unwrap();
4909 let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4911 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
4912 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4914 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
4918 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4919 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4921 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
4922 let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
4923 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
4924 let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
4926 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap();
4927 let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4928 assert!(node_1_shutdown == node_1_2nd_shutdown);
4930 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap();
4931 let node_0_2nd_shutdown = if recv_count > 0 {
4932 let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4933 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
4936 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4937 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
4938 get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
4940 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap();
4942 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4943 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4945 assert!(nodes[2].node.claim_funds(our_payment_preimage));
4946 check_added_monitors!(nodes[2], 1);
4947 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
4948 assert!(updates.update_add_htlcs.is_empty());
4949 assert!(updates.update_fail_htlcs.is_empty());
4950 assert!(updates.update_fail_malformed_htlcs.is_empty());
4951 assert!(updates.update_fee.is_none());
4952 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
4953 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
4954 check_added_monitors!(nodes[1], 1);
4955 let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4956 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
4958 assert!(updates_2.update_add_htlcs.is_empty());
4959 assert!(updates_2.update_fail_htlcs.is_empty());
4960 assert!(updates_2.update_fail_malformed_htlcs.is_empty());
4961 assert!(updates_2.update_fee.is_none());
4962 assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
4963 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
4964 commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
4966 let events = nodes[0].node.get_and_clear_pending_events();
4967 assert_eq!(events.len(), 1);
4969 Event::PaymentSent { ref payment_preimage } => {
4970 assert_eq!(our_payment_preimage, *payment_preimage);
4972 _ => panic!("Unexpected event"),
4975 let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
4977 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
4978 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
4979 assert!(node_1_closing_signed.is_some());
4982 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4983 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4985 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
4986 let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
4987 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
4988 if recv_count == 0 {
4989 // If all closing_signeds weren't delivered we can just resume where we left off...
4990 let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
4992 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap();
4993 let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
4994 assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
4996 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap();
4997 let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
4998 assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
5000 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap();
5001 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
5003 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap();
5004 let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
5005 assert!(node_0_closing_signed == node_0_2nd_closing_signed);
5007 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap();
5008 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
5009 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
5010 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
5011 assert!(node_0_none.is_none());
5013 // If one node, however, received + responded with an identical closing_signed we end
5014 // up erroring and node[0] will try to broadcast its own latest commitment transaction.
5015 // There isn't really anything better we can do simply, but in the future we might
5016 // explore storing a set of recently-closed channels that got disconnected during
5017 // closing_signed and avoiding broadcasting local commitment txn for some timeout to
5018 // give our counterparty enough time to (potentially) broadcast a cooperative closing
5020 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
5022 if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) =
5023 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
5024 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
5025 let msgs::ErrorMessage {ref channel_id, ..} = msg;
5026 assert_eq!(*channel_id, chan_1.2);
5027 } else { panic!("Needed SendErrorMessage close"); }
5029 // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
5030 // checks it, but in this case nodes[0] didn't ever get a chance to receive a
5031 // closing_signed so we do it ourselves
5032 let events = nodes[0].node.get_and_clear_pending_msg_events();
5033 assert_eq!(events.len(), 1);
5035 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
5036 assert_eq!(msg.contents.flags & 2, 2);
5038 _ => panic!("Unexpected event"),
5042 assert!(nodes[0].node.list_channels().is_empty());
5044 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
5045 nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
5046 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
5047 assert!(nodes[1].node.list_channels().is_empty());
5048 assert!(nodes[2].node.list_channels().is_empty());
5052 fn test_shutdown_rebroadcast() {
5053 do_test_shutdown_rebroadcast(0);
5054 do_test_shutdown_rebroadcast(1);
5055 do_test_shutdown_rebroadcast(2);
5059 fn fake_network_test() {
5060 // Simple test which builds a network of ChannelManagers, connects them to each other, and
5061 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
5062 let nodes = create_network(4);
5064 // Create some initial channels
5065 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5066 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5067 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5069 // Rebalance the network a bit by relaying one payment through all the channels...
5070 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5071 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5072 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5073 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
5075 // Send some more payments
5076 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
5077 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
5078 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
5080 // Test failure packets
5081 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
5082 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
5084 // Add a new channel that skips 3
5085 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
5087 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
5088 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
5089 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5090 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5091 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5092 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5093 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
5095 // Do some rebalance loop payments, simultaneously
5096 let mut hops = Vec::with_capacity(3);
5097 hops.push(RouteHop {
5098 pubkey: nodes[2].node.get_our_node_id(),
5099 short_channel_id: chan_2.0.contents.short_channel_id,
5101 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
5103 hops.push(RouteHop {
5104 pubkey: nodes[3].node.get_our_node_id(),
5105 short_channel_id: chan_3.0.contents.short_channel_id,
5107 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
5109 hops.push(RouteHop {
5110 pubkey: nodes[1].node.get_our_node_id(),
5111 short_channel_id: chan_4.0.contents.short_channel_id,
5113 cltv_expiry_delta: TEST_FINAL_CLTV,
5115 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
5116 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
5117 let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
5119 let mut hops = Vec::with_capacity(3);
5120 hops.push(RouteHop {
5121 pubkey: nodes[3].node.get_our_node_id(),
5122 short_channel_id: chan_4.0.contents.short_channel_id,
5124 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
5126 hops.push(RouteHop {
5127 pubkey: nodes[2].node.get_our_node_id(),
5128 short_channel_id: chan_3.0.contents.short_channel_id,
5130 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
5132 hops.push(RouteHop {
5133 pubkey: nodes[1].node.get_our_node_id(),
5134 short_channel_id: chan_2.0.contents.short_channel_id,
5136 cltv_expiry_delta: TEST_FINAL_CLTV,
5138 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
5139 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
5140 let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
5142 // Claim the rebalances...
5143 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
5144 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
5146 // Add a duplicate new channel from 2 to 4
5147 let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
5149 // Send some payments across both channels
5150 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
5151 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
5152 let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
5154 route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
5156 //TODO: Test that routes work again here as we've been notified that the channel is full
5158 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
5159 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
5160 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
5162 // Close down the channels...
5163 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
5164 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
5165 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
5166 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
5167 close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
5171 fn duplicate_htlc_test() {
5172 // Test that we accept duplicate payment_hash HTLCs across the network and that
5173 // claiming/failing them are all separate and don't effect each other
5174 let mut nodes = create_network(6);
5176 // Create some initial channels to route via 3 to 4/5 from 0/1/2
5177 create_announced_chan_between_nodes(&nodes, 0, 3);
5178 create_announced_chan_between_nodes(&nodes, 1, 3);
5179 create_announced_chan_between_nodes(&nodes, 2, 3);
5180 create_announced_chan_between_nodes(&nodes, 3, 4);
5181 create_announced_chan_between_nodes(&nodes, 3, 5);
5183 let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
5185 *nodes[0].network_payment_count.borrow_mut() -= 1;
5186 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
5188 *nodes[0].network_payment_count.borrow_mut() -= 1;
5189 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
5191 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
5192 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
5193 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
5196 #[derive(PartialEq)]
5197 enum HTLCType { NONE, TIMEOUT, SUCCESS }
5198 /// Tests that the given node has broadcast transactions for the given Channel
5200 /// First checks that the latest local commitment tx has been broadcast, unless an explicit
5201 /// commitment_tx is provided, which may be used to test that a remote commitment tx was
5202 /// broadcast and the revoked outputs were claimed.
5204 /// Next tests that there is (or is not) a transaction that spends the commitment transaction
5205 /// that appears to be the type of HTLC transaction specified in has_htlc_tx.
5207 /// All broadcast transactions must be accounted for in one of the above three types of we'll
5209 fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
5210 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
5211 assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
5213 let mut res = Vec::with_capacity(2);
5214 node_txn.retain(|tx| {
5215 if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
5216 check_spends!(tx, chan.3.clone());
5217 if commitment_tx.is_none() {
5218 res.push(tx.clone());
5223 if let Some(explicit_tx) = commitment_tx {
5224 res.push(explicit_tx.clone());
5227 assert_eq!(res.len(), 1);
5229 if has_htlc_tx != HTLCType::NONE {
5230 node_txn.retain(|tx| {
5231 if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
5232 check_spends!(tx, res[0].clone());
5233 if has_htlc_tx == HTLCType::TIMEOUT {
5234 assert!(tx.lock_time != 0);
5236 assert!(tx.lock_time == 0);
5238 res.push(tx.clone());
5242 assert_eq!(res.len(), 2);
5245 assert!(node_txn.is_empty());
5249 /// Tests that the given node has broadcast a claim transaction against the provided revoked
5250 /// HTLC transaction.
5251 fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
5252 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
5253 assert_eq!(node_txn.len(), 1);
5254 node_txn.retain(|tx| {
5255 if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
5256 check_spends!(tx, revoked_tx.clone());
5260 assert!(node_txn.is_empty());
5263 fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
5264 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
5266 assert!(node_txn.len() >= 1);
5267 assert_eq!(node_txn[0].input.len(), 1);
5268 let mut found_prev = false;
5270 for tx in prev_txn {
5271 if node_txn[0].input[0].previous_output.txid == tx.txid() {
5272 check_spends!(node_txn[0], tx.clone());
5273 assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
5274 assert_eq!(tx.input.len(), 1); // must spend a commitment tx
5280 assert!(found_prev);
5282 let mut res = Vec::new();
5283 mem::swap(&mut *node_txn, &mut res);
5287 fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
5288 let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
5289 assert_eq!(events_1.len(), 1);
5290 let as_update = match events_1[0] {
5291 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
5294 _ => panic!("Unexpected event"),
5297 let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
5298 assert_eq!(events_2.len(), 1);
5299 let bs_update = match events_2[0] {
5300 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
5303 _ => panic!("Unexpected event"),
5307 node.router.handle_channel_update(&as_update).unwrap();
5308 node.router.handle_channel_update(&bs_update).unwrap();
5312 macro_rules! expect_pending_htlcs_forwardable {
5314 let events = $node.node.get_and_clear_pending_events();
5315 assert_eq!(events.len(), 1);
5317 Event::PendingHTLCsForwardable { .. } => { },
5318 _ => panic!("Unexpected event"),
5320 $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
5321 $node.node.process_pending_htlc_forwards();
5326 fn channel_reserve_test() {
5328 use std::sync::atomic::Ordering;
5329 use ln::msgs::HandleError;
5331 macro_rules! get_channel_value_stat {
5332 ($node: expr, $channel_id: expr) => {{
5333 let chan_lock = $node.node.channel_state.lock().unwrap();
5334 let chan = chan_lock.by_id.get(&$channel_id).unwrap();
5335 chan.get_value_stat()
5339 let mut nodes = create_network(3);
5340 let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001);
5341 let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001);
5343 let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
5344 let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
5346 let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
5347 let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
5349 macro_rules! get_route_and_payment_hash {
5350 ($recv_value: expr) => {{
5351 let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
5352 let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
5353 (route, payment_hash, payment_preimage)
5357 macro_rules! expect_forward {
5359 let mut events = $node.node.get_and_clear_pending_msg_events();
5360 assert_eq!(events.len(), 1);
5361 check_added_monitors!($node, 1);
5362 let payment_event = SendEvent::from_event(events.remove(0));
5367 macro_rules! expect_payment_received {
5368 ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
5369 let events = $node.node.get_and_clear_pending_events();
5370 assert_eq!(events.len(), 1);
5372 Event::PaymentReceived { ref payment_hash, amt } => {
5373 assert_eq!($expected_payment_hash, *payment_hash);
5374 assert_eq!($expected_recv_value, amt);
5376 _ => panic!("Unexpected event"),
5381 let feemsat = 239; // somehow we know?
5382 let total_fee_msat = (nodes.len() - 2) as u64 * 239;
5384 let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
5386 // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
5388 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
5389 assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
5390 let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
5392 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
5393 _ => panic!("Unknown error variants"),
5397 let mut htlc_id = 0;
5398 // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
5399 // nodes[0]'s wealth
5401 let amt_msat = recv_value_0 + total_fee_msat;
5402 if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
5405 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
5408 let (stat01_, stat11_, stat12_, stat22_) = (
5409 get_channel_value_stat!(nodes[0], chan_1.2),
5410 get_channel_value_stat!(nodes[1], chan_1.2),
5411 get_channel_value_stat!(nodes[1], chan_2.2),
5412 get_channel_value_stat!(nodes[2], chan_2.2),
5415 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
5416 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
5417 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
5418 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
5419 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
5423 let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
5424 // attempt to get channel_reserve violation
5425 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
5426 let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
5428 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
5429 _ => panic!("Unknown error variants"),
5433 // adding pending output
5434 let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
5435 let amt_msat_1 = recv_value_1 + total_fee_msat;
5437 let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
5438 let payment_event_1 = {
5439 nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
5440 check_added_monitors!(nodes[0], 1);
5442 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
5443 assert_eq!(events.len(), 1);
5444 SendEvent::from_event(events.remove(0))
5446 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
5448 // channel reserve test with htlc pending output > 0
5449 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
5451 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
5452 match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
5453 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
5454 _ => panic!("Unknown error variants"),
5459 // test channel_reserve test on nodes[1] side
5460 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
5462 // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
5463 let secp_ctx = Secp256k1::new();
5464 let session_priv = SecretKey::from_slice(&secp_ctx, &{
5465 let mut session_key = [0; 32];
5466 rng::fill_bytes(&mut session_key);
5468 }).expect("RNG is bad!");
5470 let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
5471 let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
5472 let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap();
5473 let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
5474 let msg = msgs::UpdateAddHTLC {
5475 channel_id: chan_1.2,
5477 amount_msat: htlc_msat,
5478 payment_hash: our_payment_hash,
5479 cltv_expiry: htlc_cltv,
5480 onion_routing_packet: onion_packet,
5483 let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
5485 HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
5489 // split the rest to test holding cell
5490 let recv_value_21 = recv_value_2/2;
5491 let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
5493 let stat = get_channel_value_stat!(nodes[0], chan_1.2);
5494 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
5497 // now see if they go through on both sides
5498 let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
5499 // but this will stuck in the holding cell
5500 nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
5501 check_added_monitors!(nodes[0], 0);
5502 let events = nodes[0].node.get_and_clear_pending_events();
5503 assert_eq!(events.len(), 0);
5505 // test with outbound holding cell amount > 0
5507 let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
5508 match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
5509 APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
5510 _ => panic!("Unknown error variants"),
5514 let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
5515 // this will also stuck in the holding cell
5516 nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
5517 check_added_monitors!(nodes[0], 0);
5518 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
5519 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5521 // flush the pending htlc
5522 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
5523 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5524 check_added_monitors!(nodes[1], 1);
5526 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
5527 check_added_monitors!(nodes[0], 1);
5528 let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
5530 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed).unwrap();
5531 let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
5532 // No commitment_signed so get_event_msg's assert(len == 1) passes
5533 check_added_monitors!(nodes[0], 1);
5535 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
5536 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
5537 check_added_monitors!(nodes[1], 1);
5539 expect_pending_htlcs_forwardable!(nodes[1]);
5541 let ref payment_event_11 = expect_forward!(nodes[1]);
5542 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
5543 commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
5545 expect_pending_htlcs_forwardable!(nodes[2]);
5546 expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
5548 // flush the htlcs in the holding cell
5549 assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
5550 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
5551 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
5552 commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
5553 expect_pending_htlcs_forwardable!(nodes[1]);
5555 let ref payment_event_3 = expect_forward!(nodes[1]);
5556 assert_eq!(payment_event_3.msgs.len(), 2);
5557 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
5558 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
5560 commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
5561 expect_pending_htlcs_forwardable!(nodes[2]);
5563 let events = nodes[2].node.get_and_clear_pending_events();
5564 assert_eq!(events.len(), 2);
5566 Event::PaymentReceived { ref payment_hash, amt } => {
5567 assert_eq!(our_payment_hash_21, *payment_hash);
5568 assert_eq!(recv_value_21, amt);
5570 _ => panic!("Unexpected event"),
5573 Event::PaymentReceived { ref payment_hash, amt } => {
5574 assert_eq!(our_payment_hash_22, *payment_hash);
5575 assert_eq!(recv_value_22, amt);
5577 _ => panic!("Unexpected event"),
5580 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
5581 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
5582 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
5584 let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
5585 let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
5586 assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
5587 assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
5589 let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
5590 assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
5594 fn channel_monitor_network_test() {
5595 // Simple test which builds a network of ChannelManagers, connects them to each other, and
5596 // tests that ChannelMonitor is able to recover from various states.
5597 let nodes = create_network(5);
5599 // Create some initial channels
5600 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5601 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
5602 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
5603 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
5605 // Rebalance the network a bit by relaying one payment through all the channels...
5606 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5607 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5608 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5609 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
5611 // Simple case with no pending HTLCs:
5612 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
5614 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
5615 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5616 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
5617 test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
5619 get_announce_close_broadcast_events(&nodes, 0, 1);
5620 assert_eq!(nodes[0].node.list_channels().len(), 0);
5621 assert_eq!(nodes[1].node.list_channels().len(), 1);
5623 // One pending HTLC is discarded by the force-close:
5624 let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
5626 // Simple case of one pending HTLC to HTLC-Timeout
5627 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
5629 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
5630 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5631 nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
5632 test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
5634 get_announce_close_broadcast_events(&nodes, 1, 2);
5635 assert_eq!(nodes[1].node.list_channels().len(), 0);
5636 assert_eq!(nodes[2].node.list_channels().len(), 1);
5638 macro_rules! claim_funds {
5639 ($node: expr, $prev_node: expr, $preimage: expr) => {
5641 assert!($node.node.claim_funds($preimage));
5642 check_added_monitors!($node, 1);
5644 let events = $node.node.get_and_clear_pending_msg_events();
5645 assert_eq!(events.len(), 1);
5647 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
5648 assert!(update_add_htlcs.is_empty());
5649 assert!(update_fail_htlcs.is_empty());
5650 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
5652 _ => panic!("Unexpected event"),
5658 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
5659 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
5660 nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
5662 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
5664 // Claim the payment on nodes[3], giving it knowledge of the preimage
5665 claim_funds!(nodes[3], nodes[2], payment_preimage_1);
5667 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5668 nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
5670 check_preimage_claim(&nodes[3], &node_txn);
5672 get_announce_close_broadcast_events(&nodes, 2, 3);
5673 assert_eq!(nodes[2].node.list_channels().len(), 0);
5674 assert_eq!(nodes[3].node.list_channels().len(), 1);
5676 { // Cheat and reset nodes[4]'s height to 1
5677 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5678 nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
5681 assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
5682 assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
5683 // One pending HTLC to time out:
5684 let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
5685 // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
5689 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5690 nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
5691 for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
5692 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5693 nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
5696 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
5698 // Claim the payment on nodes[4], giving it knowledge of the preimage
5699 claim_funds!(nodes[4], nodes[3], payment_preimage_2);
5701 header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5702 nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
5703 for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
5704 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5705 nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
5708 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
5710 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5711 nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
5713 check_preimage_claim(&nodes[4], &node_txn);
5715 get_announce_close_broadcast_events(&nodes, 3, 4);
5716 assert_eq!(nodes[3].node.list_channels().len(), 0);
5717 assert_eq!(nodes[4].node.list_channels().len(), 0);
5721 fn test_justice_tx() {
5722 // Test justice txn built on revoked HTLC-Success tx, against both sides
5724 let nodes = create_network(2);
5725 // Create some new channels:
5726 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
5728 // A pending HTLC which will be revoked:
5729 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5730 // Get the will-be-revoked local txn from nodes[0]
5731 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
5732 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
5733 assert_eq!(revoked_local_txn[0].input.len(), 1);
5734 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
5735 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
5736 assert_eq!(revoked_local_txn[1].input.len(), 1);
5737 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
5738 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
5739 // Revoke the old state
5740 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
5743 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5744 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5746 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5747 assert_eq!(node_txn.len(), 3);
5748 assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
5749 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
5751 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5752 node_txn.swap_remove(0);
5754 test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
5756 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5757 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
5758 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5759 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
5760 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
5762 get_announce_close_broadcast_events(&nodes, 0, 1);
5764 assert_eq!(nodes[0].node.list_channels().len(), 0);
5765 assert_eq!(nodes[1].node.list_channels().len(), 0);
5767 // We test justice_tx build by A on B's revoked HTLC-Success tx
5768 // Create some new channels:
5769 let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
5771 // A pending HTLC which will be revoked:
5772 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5773 // Get the will-be-revoked local txn from B
5774 let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
5775 assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
5776 assert_eq!(revoked_local_txn[0].input.len(), 1);
5777 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
5778 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
5779 // Revoke the old state
5780 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
5782 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5783 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5785 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5786 assert_eq!(node_txn.len(), 3);
5787 assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
5788 assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
5790 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5791 node_txn.swap_remove(0);
5793 test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
5795 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5796 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
5797 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5798 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
5799 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone());
5801 get_announce_close_broadcast_events(&nodes, 0, 1);
5802 assert_eq!(nodes[0].node.list_channels().len(), 0);
5803 assert_eq!(nodes[1].node.list_channels().len(), 0);
5807 fn revoked_output_claim() {
5808 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
5809 // transaction is broadcast by its counterparty
5810 let nodes = create_network(2);
5811 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5812 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
5813 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
5814 assert_eq!(revoked_local_txn.len(), 1);
5815 // Only output is the full channel value back to nodes[0]:
5816 assert_eq!(revoked_local_txn[0].output.len(), 1);
5817 // Send a payment through, updating everyone's latest commitment txn
5818 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
5820 // Inform nodes[1] that nodes[0] broadcast a stale tx
5821 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5822 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5823 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5824 assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
5826 assert_eq!(node_txn[0], node_txn[2]);
5828 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5829 check_spends!(node_txn[1], chan_1.3.clone());
5831 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
5832 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5833 get_announce_close_broadcast_events(&nodes, 0, 1);
5837 fn claim_htlc_outputs_shared_tx() {
5838 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
5839 let nodes = create_network(2);
5841 // Create some new channel:
5842 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5844 // Rebalance the network to generate htlc in the two directions
5845 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5846 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
5847 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5848 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
5850 // Get the will-be-revoked local txn from node[0]
5851 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
5852 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
5853 assert_eq!(revoked_local_txn[0].input.len(), 1);
5854 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5855 assert_eq!(revoked_local_txn[1].input.len(), 1);
5856 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
5857 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
5858 check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
5860 //Revoke the old state
5861 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
5864 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5866 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5868 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
5869 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5870 assert_eq!(node_txn.len(), 4);
5872 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
5873 check_spends!(node_txn[0], revoked_local_txn[0].clone());
5875 assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
5877 let mut witness_lens = BTreeSet::new();
5878 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
5879 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
5880 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
5881 assert_eq!(witness_lens.len(), 3);
5882 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
5883 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
5884 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
5886 // Next nodes[1] broadcasts its current local tx state:
5887 assert_eq!(node_txn[1].input.len(), 1);
5888 assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
5890 assert_eq!(node_txn[2].input.len(), 1);
5891 let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
5892 assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
5893 assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
5894 assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
5895 assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
5897 get_announce_close_broadcast_events(&nodes, 0, 1);
5898 assert_eq!(nodes[0].node.list_channels().len(), 0);
5899 assert_eq!(nodes[1].node.list_channels().len(), 0);
5903 fn claim_htlc_outputs_single_tx() {
5904 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
5905 let nodes = create_network(2);
5907 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
5909 // Rebalance the network to generate htlc in the two directions
5910 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5911 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
5912 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
5913 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5914 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
5916 // Get the will-be-revoked local txn from node[0]
5917 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
5919 //Revoke the old state
5920 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
5923 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5925 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
5927 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
5928 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5929 assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan)
5931 assert_eq!(node_txn[0], node_txn[7]);
5932 assert_eq!(node_txn[1], node_txn[8]);
5933 assert_eq!(node_txn[2], node_txn[9]);
5934 assert_eq!(node_txn[3], node_txn[10]);
5935 assert_eq!(node_txn[4], node_txn[11]);
5936 assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger
5937 assert_eq!(node_txn[4], node_txn[6]);
5939 assert_eq!(node_txn[0].input.len(), 1);
5940 assert_eq!(node_txn[1].input.len(), 1);
5941 assert_eq!(node_txn[2].input.len(), 1);
5943 let mut revoked_tx_map = HashMap::new();
5944 revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone());
5945 node_txn[0].verify(&revoked_tx_map).unwrap();
5946 node_txn[1].verify(&revoked_tx_map).unwrap();
5947 node_txn[2].verify(&revoked_tx_map).unwrap();
5949 let mut witness_lens = BTreeSet::new();
5950 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
5951 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
5952 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
5953 assert_eq!(witness_lens.len(), 3);
5954 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
5955 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
5956 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
5958 assert_eq!(node_txn[3].input.len(), 1);
5959 check_spends!(node_txn[3], chan_1.3.clone());
5961 assert_eq!(node_txn[4].input.len(), 1);
5962 let witness_script = node_txn[4].input[0].witness.last().unwrap();
5963 assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
5964 assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
5965 assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
5966 assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
5968 get_announce_close_broadcast_events(&nodes, 0, 1);
5969 assert_eq!(nodes[0].node.list_channels().len(), 0);
5970 assert_eq!(nodes[1].node.list_channels().len(), 0);
5974 fn test_htlc_ignore_latest_remote_commitment() {
5975 // Test that HTLC transactions spending the latest remote commitment transaction are simply
5976 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
5977 let nodes = create_network(2);
5978 create_announced_chan_between_nodes(&nodes, 0, 1);
5980 route_payment(&nodes[0], &[&nodes[1]], 10000000);
5981 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
5983 let events = nodes[0].node.get_and_clear_pending_msg_events();
5984 assert_eq!(events.len(), 1);
5986 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
5987 assert_eq!(flags & 0b10, 0b10);
5989 _ => panic!("Unexpected event"),
5993 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5994 assert_eq!(node_txn.len(), 2);
5996 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5997 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
6000 let events = nodes[1].node.get_and_clear_pending_msg_events();
6001 assert_eq!(events.len(), 1);
6003 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6004 assert_eq!(flags & 0b10, 0b10);
6006 _ => panic!("Unexpected event"),
6010 // Duplicate the block_connected call since this may happen due to other listeners
6011 // registering new transactions
6012 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
6016 fn test_force_close_fail_back() {
6017 // Check which HTLCs are failed-backwards on channel force-closure
6018 let mut nodes = create_network(3);
6019 create_announced_chan_between_nodes(&nodes, 0, 1);
6020 create_announced_chan_between_nodes(&nodes, 1, 2);
6022 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
6024 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
6026 let mut payment_event = {
6027 nodes[0].node.send_payment(route, our_payment_hash).unwrap();
6028 check_added_monitors!(nodes[0], 1);
6030 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6031 assert_eq!(events.len(), 1);
6032 SendEvent::from_event(events.remove(0))
6035 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6036 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6038 let events_1 = nodes[1].node.get_and_clear_pending_events();
6039 assert_eq!(events_1.len(), 1);
6041 Event::PendingHTLCsForwardable { .. } => { },
6042 _ => panic!("Unexpected event"),
6045 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
6046 nodes[1].node.process_pending_htlc_forwards();
6048 let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6049 assert_eq!(events_2.len(), 1);
6050 payment_event = SendEvent::from_event(events_2.remove(0));
6051 assert_eq!(payment_event.msgs.len(), 1);
6053 check_added_monitors!(nodes[1], 1);
6054 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6055 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
6056 check_added_monitors!(nodes[2], 1);
6057 let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6059 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
6060 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
6061 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
6063 nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
6064 let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
6065 assert_eq!(events_3.len(), 1);
6067 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6068 assert_eq!(flags & 0b10, 0b10);
6070 _ => panic!("Unexpected event"),
6074 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
6075 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
6076 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
6077 // back to nodes[1] upon timeout otherwise.
6078 assert_eq!(node_txn.len(), 1);
6082 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6083 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
6085 let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
6086 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
6087 assert_eq!(events_4.len(), 1);
6089 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6090 assert_eq!(flags & 0b10, 0b10);
6092 _ => panic!("Unexpected event"),
6095 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
6097 let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
6098 monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap()
6099 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
6101 nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
6102 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
6103 assert_eq!(node_txn.len(), 1);
6104 assert_eq!(node_txn[0].input.len(), 1);
6105 assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
6106 assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
6107 assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
6109 check_spends!(node_txn[0], tx);
6113 fn test_unconf_chan() {
6114 // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
6115 let nodes = create_network(2);
6116 create_announced_chan_between_nodes(&nodes, 0, 1);
6118 let channel_state = nodes[0].node.channel_state.lock().unwrap();
6119 assert_eq!(channel_state.by_id.len(), 1);
6120 assert_eq!(channel_state.short_to_id.len(), 1);
6121 mem::drop(channel_state);
6123 let mut headers = Vec::new();
6124 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6125 headers.push(header.clone());
6127 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6128 headers.push(header.clone());
6130 while !headers.is_empty() {
6131 nodes[0].node.block_disconnected(&headers.pop().unwrap());
6134 let events = nodes[0].node.get_and_clear_pending_msg_events();
6135 assert_eq!(events.len(), 1);
6137 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
6138 assert_eq!(flags & 0b10, 0b10);
6140 _ => panic!("Unexpected event"),
6143 let channel_state = nodes[0].node.channel_state.lock().unwrap();
6144 assert_eq!(channel_state.by_id.len(), 0);
6145 assert_eq!(channel_state.short_to_id.len(), 0);
6148 macro_rules! get_chan_reestablish_msgs {
6149 ($src_node: expr, $dst_node: expr) => {
6151 let mut res = Vec::with_capacity(1);
6152 for msg in $src_node.node.get_and_clear_pending_msg_events() {
6153 if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
6154 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6155 res.push(msg.clone());
6157 panic!("Unexpected event")
6165 macro_rules! handle_chan_reestablish_msgs {
6166 ($src_node: expr, $dst_node: expr) => {
6168 let msg_events = $src_node.node.get_and_clear_pending_msg_events();
6170 let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) {
6172 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6178 let mut revoke_and_ack = None;
6179 let mut commitment_update = None;
6180 let order = if let Some(ev) = msg_events.get(idx) {
6183 &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
6184 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6185 revoke_and_ack = Some(msg.clone());
6186 RAACommitmentOrder::RevokeAndACKFirst
6188 &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
6189 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6190 commitment_update = Some(updates.clone());
6191 RAACommitmentOrder::CommitmentFirst
6193 _ => panic!("Unexpected event"),
6196 RAACommitmentOrder::CommitmentFirst
6199 if let Some(ev) = msg_events.get(idx) {
6201 &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
6202 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6203 assert!(revoke_and_ack.is_none());
6204 revoke_and_ack = Some(msg.clone());
6206 &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
6207 assert_eq!(*node_id, $dst_node.node.get_our_node_id());
6208 assert!(commitment_update.is_none());
6209 commitment_update = Some(updates.clone());
6211 _ => panic!("Unexpected event"),
6215 (funding_locked, revoke_and_ack, commitment_update, order)
6220 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
6221 /// for claims/fails they are separated out.
6222 fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
6223 node_a.node.peer_connected(&node_b.node.get_our_node_id());
6224 let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
6225 node_b.node.peer_connected(&node_a.node.get_our_node_id());
6226 let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
6228 let mut resp_1 = Vec::new();
6229 for msg in reestablish_1 {
6230 node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap();
6231 resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a));
6233 if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
6234 check_added_monitors!(node_b, 1);
6236 check_added_monitors!(node_b, 0);
6239 let mut resp_2 = Vec::new();
6240 for msg in reestablish_2 {
6241 node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap();
6242 resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b));
6244 if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
6245 check_added_monitors!(node_a, 1);
6247 check_added_monitors!(node_a, 0);
6250 // We dont yet support both needing updates, as that would require a different commitment dance:
6251 assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
6252 (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
6254 for chan_msgs in resp_1.drain(..) {
6255 if send_funding_locked.0 {
6256 node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
6257 let announcement_event = node_a.node.get_and_clear_pending_msg_events();
6258 if !announcement_event.is_empty() {
6259 assert_eq!(announcement_event.len(), 1);
6260 if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
6261 //TODO: Test announcement_sigs re-sending
6262 } else { panic!("Unexpected event!"); }
6265 assert!(chan_msgs.0.is_none());
6268 assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
6269 node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
6270 assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
6271 check_added_monitors!(node_a, 1);
6273 assert!(chan_msgs.1.is_none());
6275 if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
6276 let commitment_update = chan_msgs.2.unwrap();
6277 if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
6278 assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
6280 assert!(commitment_update.update_add_htlcs.is_empty());
6282 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
6283 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
6284 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
6285 for update_add in commitment_update.update_add_htlcs {
6286 node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
6288 for update_fulfill in commitment_update.update_fulfill_htlcs {
6289 node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
6291 for update_fail in commitment_update.update_fail_htlcs {
6292 node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
6295 if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
6296 commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
6298 node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
6299 check_added_monitors!(node_a, 1);
6300 let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id());
6301 // No commitment_signed so get_event_msg's assert(len == 1) passes
6302 node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6303 assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
6304 check_added_monitors!(node_b, 1);
6307 assert!(chan_msgs.2.is_none());
6311 for chan_msgs in resp_2.drain(..) {
6312 if send_funding_locked.1 {
6313 node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
6314 let announcement_event = node_b.node.get_and_clear_pending_msg_events();
6315 if !announcement_event.is_empty() {
6316 assert_eq!(announcement_event.len(), 1);
6317 if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
6318 //TODO: Test announcement_sigs re-sending
6319 } else { panic!("Unexpected event!"); }
6322 assert!(chan_msgs.0.is_none());
6325 assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
6326 node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
6327 assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
6328 check_added_monitors!(node_b, 1);
6330 assert!(chan_msgs.1.is_none());
6332 if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
6333 let commitment_update = chan_msgs.2.unwrap();
6334 if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
6335 assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
6337 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
6338 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
6339 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
6340 for update_add in commitment_update.update_add_htlcs {
6341 node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
6343 for update_fulfill in commitment_update.update_fulfill_htlcs {
6344 node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
6346 for update_fail in commitment_update.update_fail_htlcs {
6347 node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
6350 if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
6351 commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
6353 node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
6354 check_added_monitors!(node_b, 1);
6355 let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id());
6356 // No commitment_signed so get_event_msg's assert(len == 1) passes
6357 node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6358 assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
6359 check_added_monitors!(node_a, 1);
6362 assert!(chan_msgs.2.is_none());
6368 fn test_simple_peer_disconnect() {
6369 // Test that we can reconnect when there are no lost messages
6370 let nodes = create_network(3);
6371 create_announced_chan_between_nodes(&nodes, 0, 1);
6372 create_announced_chan_between_nodes(&nodes, 1, 2);
6374 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6375 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6376 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6378 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
6379 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
6380 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
6381 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
6383 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6384 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6385 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6387 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
6388 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
6389 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
6390 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
6392 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6393 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6395 claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
6396 fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
6398 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
6400 let events = nodes[0].node.get_and_clear_pending_events();
6401 assert_eq!(events.len(), 2);
6403 Event::PaymentSent { payment_preimage } => {
6404 assert_eq!(payment_preimage, payment_preimage_3);
6406 _ => panic!("Unexpected event"),
6409 Event::PaymentFailed { payment_hash, rejected_by_dest } => {
6410 assert_eq!(payment_hash, payment_hash_5);
6411 assert!(rejected_by_dest);
6413 _ => panic!("Unexpected event"),
6417 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
6418 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
6421 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
6422 // Test that we can reconnect when in-flight HTLC updates get dropped
6423 let mut nodes = create_network(2);
6424 if messages_delivered == 0 {
6425 create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
6426 // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
6428 create_announced_chan_between_nodes(&nodes, 0, 1);
6431 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6432 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
6434 let payment_event = {
6435 nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
6436 check_added_monitors!(nodes[0], 1);
6438 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6439 assert_eq!(events.len(), 1);
6440 SendEvent::from_event(events.remove(0))
6442 assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
6444 if messages_delivered < 2 {
6445 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
6447 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6448 if messages_delivered >= 3 {
6449 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
6450 check_added_monitors!(nodes[1], 1);
6451 let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6453 if messages_delivered >= 4 {
6454 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6455 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6456 check_added_monitors!(nodes[0], 1);
6458 if messages_delivered >= 5 {
6459 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
6460 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
6461 // No commitment_signed so get_event_msg's assert(len == 1) passes
6462 check_added_monitors!(nodes[0], 1);
6464 if messages_delivered >= 6 {
6465 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6466 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
6467 check_added_monitors!(nodes[1], 1);
6474 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6475 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6476 if messages_delivered < 3 {
6477 // Even if the funding_locked messages get exchanged, as long as nothing further was
6478 // received on either side, both sides will need to resend them.
6479 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
6480 } else if messages_delivered == 3 {
6481 // nodes[0] still wants its RAA + commitment_signed
6482 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
6483 } else if messages_delivered == 4 {
6484 // nodes[0] still wants its commitment_signed
6485 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
6486 } else if messages_delivered == 5 {
6487 // nodes[1] still wants its final RAA
6488 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
6489 } else if messages_delivered == 6 {
6490 // Everything was delivered...
6491 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6494 let events_1 = nodes[1].node.get_and_clear_pending_events();
6495 assert_eq!(events_1.len(), 1);
6497 Event::PendingHTLCsForwardable { .. } => { },
6498 _ => panic!("Unexpected event"),
6501 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6502 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6503 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6505 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
6506 nodes[1].node.process_pending_htlc_forwards();
6508 let events_2 = nodes[1].node.get_and_clear_pending_events();
6509 assert_eq!(events_2.len(), 1);
6511 Event::PaymentReceived { ref payment_hash, amt } => {
6512 assert_eq!(payment_hash_1, *payment_hash);
6513 assert_eq!(amt, 1000000);
6515 _ => panic!("Unexpected event"),
6518 nodes[1].node.claim_funds(payment_preimage_1);
6519 check_added_monitors!(nodes[1], 1);
6521 let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
6522 assert_eq!(events_3.len(), 1);
6523 let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
6524 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
6525 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6526 assert!(updates.update_add_htlcs.is_empty());
6527 assert!(updates.update_fail_htlcs.is_empty());
6528 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
6529 assert!(updates.update_fail_malformed_htlcs.is_empty());
6530 assert!(updates.update_fee.is_none());
6531 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
6533 _ => panic!("Unexpected event"),
6536 if messages_delivered >= 1 {
6537 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
6539 let events_4 = nodes[0].node.get_and_clear_pending_events();
6540 assert_eq!(events_4.len(), 1);
6542 Event::PaymentSent { ref payment_preimage } => {
6543 assert_eq!(payment_preimage_1, *payment_preimage);
6545 _ => panic!("Unexpected event"),
6548 if messages_delivered >= 2 {
6549 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
6550 check_added_monitors!(nodes[0], 1);
6551 let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6553 if messages_delivered >= 3 {
6554 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6555 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
6556 check_added_monitors!(nodes[1], 1);
6558 if messages_delivered >= 4 {
6559 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap();
6560 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6561 // No commitment_signed so get_event_msg's assert(len == 1) passes
6562 check_added_monitors!(nodes[1], 1);
6564 if messages_delivered >= 5 {
6565 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6566 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6567 check_added_monitors!(nodes[0], 1);
6574 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6575 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6576 if messages_delivered < 2 {
6577 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
6578 //TODO: Deduplicate PaymentSent events, then enable this if:
6579 //if messages_delivered < 1 {
6580 let events_4 = nodes[0].node.get_and_clear_pending_events();
6581 assert_eq!(events_4.len(), 1);
6583 Event::PaymentSent { ref payment_preimage } => {
6584 assert_eq!(payment_preimage_1, *payment_preimage);
6586 _ => panic!("Unexpected event"),
6589 } else if messages_delivered == 2 {
6590 // nodes[0] still wants its RAA + commitment_signed
6591 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
6592 } else if messages_delivered == 3 {
6593 // nodes[0] still wants its commitment_signed
6594 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
6595 } else if messages_delivered == 4 {
6596 // nodes[1] still wants its final RAA
6597 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
6598 } else if messages_delivered == 5 {
6599 // Everything was delivered...
6600 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6603 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6604 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6605 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6607 // Channel should still work fine...
6608 let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
6609 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
6613 fn test_drop_messages_peer_disconnect_a() {
6614 do_test_drop_messages_peer_disconnect(0);
6615 do_test_drop_messages_peer_disconnect(1);
6616 do_test_drop_messages_peer_disconnect(2);
6617 do_test_drop_messages_peer_disconnect(3);
6621 fn test_drop_messages_peer_disconnect_b() {
6622 do_test_drop_messages_peer_disconnect(4);
6623 do_test_drop_messages_peer_disconnect(5);
6624 do_test_drop_messages_peer_disconnect(6);
6628 fn test_funding_peer_disconnect() {
6629 // Test that we can lock in our funding tx while disconnected
6630 let nodes = create_network(2);
6631 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
6633 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6634 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6636 confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
6637 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
6638 assert_eq!(events_1.len(), 1);
6640 MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
6641 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
6643 _ => panic!("Unexpected event"),
6646 reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6648 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6649 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6651 confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
6652 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6653 assert_eq!(events_2.len(), 2);
6655 MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
6656 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6658 _ => panic!("Unexpected event"),
6661 MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => {
6662 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6664 _ => panic!("Unexpected event"),
6667 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6669 // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
6670 // rebroadcasting announcement_signatures upon reconnect.
6672 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6673 let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
6674 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
6678 fn test_drop_messages_peer_disconnect_dual_htlc() {
6679 // Test that we can handle reconnecting when both sides of a channel have pending
6680 // commitment_updates when we disconnect.
6681 let mut nodes = create_network(2);
6682 create_announced_chan_between_nodes(&nodes, 0, 1);
6684 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6686 // Now try to send a second payment which will fail to send
6687 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6688 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
6690 nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
6691 check_added_monitors!(nodes[0], 1);
6693 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
6694 assert_eq!(events_1.len(), 1);
6696 MessageSendEvent::UpdateHTLCs { .. } => {},
6697 _ => panic!("Unexpected event"),
6700 assert!(nodes[1].node.claim_funds(payment_preimage_1));
6701 check_added_monitors!(nodes[1], 1);
6703 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6704 assert_eq!(events_2.len(), 1);
6706 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6707 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6708 assert!(update_add_htlcs.is_empty());
6709 assert_eq!(update_fulfill_htlcs.len(), 1);
6710 assert!(update_fail_htlcs.is_empty());
6711 assert!(update_fail_malformed_htlcs.is_empty());
6712 assert!(update_fee.is_none());
6714 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
6715 let events_3 = nodes[0].node.get_and_clear_pending_events();
6716 assert_eq!(events_3.len(), 1);
6718 Event::PaymentSent { ref payment_preimage } => {
6719 assert_eq!(*payment_preimage, payment_preimage_1);
6721 _ => panic!("Unexpected event"),
6724 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
6725 let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
6726 // No commitment_signed so get_event_msg's assert(len == 1) passes
6727 check_added_monitors!(nodes[0], 1);
6729 _ => panic!("Unexpected event"),
6732 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6733 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6735 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
6736 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6737 assert_eq!(reestablish_1.len(), 1);
6738 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
6739 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6740 assert_eq!(reestablish_2.len(), 1);
6742 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
6743 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6744 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
6745 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6747 assert!(as_resp.0.is_none());
6748 assert!(bs_resp.0.is_none());
6750 assert!(bs_resp.1.is_none());
6751 assert!(bs_resp.2.is_none());
6753 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
6755 assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
6756 assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
6757 assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
6758 assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
6759 assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
6760 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap();
6761 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap();
6762 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6763 // No commitment_signed so get_event_msg's assert(len == 1) passes
6764 check_added_monitors!(nodes[1], 1);
6766 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap();
6767 let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6768 assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
6769 assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
6770 assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
6771 assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
6772 assert!(bs_second_commitment_signed.update_fee.is_none());
6773 check_added_monitors!(nodes[1], 1);
6775 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
6776 let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6777 assert!(as_commitment_signed.update_add_htlcs.is_empty());
6778 assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
6779 assert!(as_commitment_signed.update_fail_htlcs.is_empty());
6780 assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
6781 assert!(as_commitment_signed.update_fee.is_none());
6782 check_added_monitors!(nodes[0], 1);
6784 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap();
6785 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
6786 // No commitment_signed so get_event_msg's assert(len == 1) passes
6787 check_added_monitors!(nodes[0], 1);
6789 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap();
6790 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6791 // No commitment_signed so get_event_msg's assert(len == 1) passes
6792 check_added_monitors!(nodes[1], 1);
6794 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
6795 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
6796 check_added_monitors!(nodes[1], 1);
6798 let events_4 = nodes[1].node.get_and_clear_pending_events();
6799 assert_eq!(events_4.len(), 1);
6801 Event::PendingHTLCsForwardable { .. } => { },
6802 _ => panic!("Unexpected event"),
6805 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
6806 nodes[1].node.process_pending_htlc_forwards();
6808 let events_5 = nodes[1].node.get_and_clear_pending_events();
6809 assert_eq!(events_5.len(), 1);
6811 Event::PaymentReceived { ref payment_hash, amt: _ } => {
6812 assert_eq!(payment_hash_2, *payment_hash);
6814 _ => panic!("Unexpected event"),
6817 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
6818 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6819 check_added_monitors!(nodes[0], 1);
6821 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
6825 fn test_simple_monitor_permanent_update_fail() {
6826 // Test that we handle a simple permanent monitor update failure
6827 let mut nodes = create_network(2);
6828 create_announced_chan_between_nodes(&nodes, 0, 1);
6830 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6831 let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
6833 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
6834 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
6835 check_added_monitors!(nodes[0], 1);
6837 let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
6838 assert_eq!(events_1.len(), 1);
6840 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6841 _ => panic!("Unexpected event"),
6844 // TODO: Once we hit the chain with the failure transaction we should check that we get a
6845 // PaymentFailed event
6847 assert_eq!(nodes[0].node.list_channels().len(), 0);
6850 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
6851 // Test that we can recover from a simple temporary monitor update failure optionally with
6852 // a disconnect in between
6853 let mut nodes = create_network(2);
6854 create_announced_chan_between_nodes(&nodes, 0, 1);
6856 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6857 let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
6859 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
6860 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); }
6861 check_added_monitors!(nodes[0], 1);
6863 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
6864 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6865 assert_eq!(nodes[0].node.list_channels().len(), 1);
6868 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6869 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6870 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6873 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
6874 nodes[0].node.test_restore_channel_monitor();
6875 check_added_monitors!(nodes[0], 1);
6877 let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
6878 assert_eq!(events_2.len(), 1);
6879 let payment_event = SendEvent::from_event(events_2.pop().unwrap());
6880 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
6881 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
6882 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6884 expect_pending_htlcs_forwardable!(nodes[1]);
6886 let events_3 = nodes[1].node.get_and_clear_pending_events();
6887 assert_eq!(events_3.len(), 1);
6889 Event::PaymentReceived { ref payment_hash, amt } => {
6890 assert_eq!(payment_hash_1, *payment_hash);
6891 assert_eq!(amt, 1000000);
6893 _ => panic!("Unexpected event"),
6896 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
6898 // Now set it to failed again...
6899 let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
6900 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
6901 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); }
6902 check_added_monitors!(nodes[0], 1);
6904 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
6905 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6906 assert_eq!(nodes[0].node.list_channels().len(), 1);
6909 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6910 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6911 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
6914 // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
6915 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
6916 nodes[0].node.test_restore_channel_monitor();
6917 check_added_monitors!(nodes[0], 1);
6919 let events_5 = nodes[0].node.get_and_clear_pending_msg_events();
6920 assert_eq!(events_5.len(), 1);
6922 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6923 _ => panic!("Unexpected event"),
6926 // TODO: Once we hit the chain with the failure transaction we should check that we get a
6927 // PaymentFailed event
6929 assert_eq!(nodes[0].node.list_channels().len(), 0);
6933 fn test_simple_monitor_temporary_update_fail() {
6934 do_test_simple_monitor_temporary_update_fail(false);
6935 do_test_simple_monitor_temporary_update_fail(true);
6938 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
6939 let disconnect_flags = 8 | 16;
6941 // Test that we can recover from a temporary monitor update failure with some in-flight
6942 // HTLCs going on at the same time potentially with some disconnection thrown in.
6943 // * First we route a payment, then get a temporary monitor update failure when trying to
6944 // route a second payment. We then claim the first payment.
6945 // * If disconnect_count is set, we will disconnect at this point (which is likely as
6946 // TemporaryFailure likely indicates net disconnect which resulted in failing to update
6947 // the ChannelMonitor on a watchtower).
6948 // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
6949 // immediately, otherwise we wait sconnect and deliver them via the reconnect
6950 // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
6951 // disconnect_count & !disconnect_flags is 0).
6952 // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
6953 // through message sending, potentially disconnect/reconnecting multiple times based on
6954 // disconnect_count, to get the update_fulfill_htlc through.
6955 // * We then walk through more message exchanges to get the original update_add_htlc
6956 // through, swapping message ordering based on disconnect_count & 8 and optionally
6957 // disconnect/reconnecting based on disconnect_count.
6958 let mut nodes = create_network(2);
6959 create_announced_chan_between_nodes(&nodes, 0, 1);
6961 let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
6963 // Now try to send a second payment which will fail to send
6964 let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
6965 let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
6967 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
6968 if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); }
6969 check_added_monitors!(nodes[0], 1);
6971 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
6972 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6973 assert_eq!(nodes[0].node.list_channels().len(), 1);
6975 // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
6976 // but nodes[0] won't respond since it is frozen.
6977 assert!(nodes[1].node.claim_funds(payment_preimage_1));
6978 check_added_monitors!(nodes[1], 1);
6979 let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
6980 assert_eq!(events_2.len(), 1);
6981 let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
6982 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
6983 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
6984 assert!(update_add_htlcs.is_empty());
6985 assert_eq!(update_fulfill_htlcs.len(), 1);
6986 assert!(update_fail_htlcs.is_empty());
6987 assert!(update_fail_malformed_htlcs.is_empty());
6988 assert!(update_fee.is_none());
6990 if (disconnect_count & 16) == 0 {
6991 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
6992 let events_3 = nodes[0].node.get_and_clear_pending_events();
6993 assert_eq!(events_3.len(), 1);
6995 Event::PaymentSent { ref payment_preimage } => {
6996 assert_eq!(*payment_preimage, payment_preimage_1);
6998 _ => panic!("Unexpected event"),
7001 if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
7002 assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
7003 } else { panic!(); }
7006 (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
7008 _ => panic!("Unexpected event"),
7011 if disconnect_count & !disconnect_flags > 0 {
7012 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7013 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7016 // Now fix monitor updating...
7017 *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
7018 nodes[0].node.test_restore_channel_monitor();
7019 check_added_monitors!(nodes[0], 1);
7021 macro_rules! disconnect_reconnect_peers { () => { {
7022 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7023 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7025 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
7026 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7027 assert_eq!(reestablish_1.len(), 1);
7028 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
7029 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7030 assert_eq!(reestablish_2.len(), 1);
7032 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
7033 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7034 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
7035 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7037 assert!(as_resp.0.is_none());
7038 assert!(bs_resp.0.is_none());
7040 (reestablish_1, reestablish_2, as_resp, bs_resp)
7043 let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
7044 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
7045 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7047 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
7048 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7049 assert_eq!(reestablish_1.len(), 1);
7050 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
7051 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7052 assert_eq!(reestablish_2.len(), 1);
7054 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
7055 check_added_monitors!(nodes[0], 0);
7056 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7057 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
7058 check_added_monitors!(nodes[1], 0);
7059 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7061 assert!(as_resp.0.is_none());
7062 assert!(bs_resp.0.is_none());
7064 assert!(bs_resp.1.is_none());
7065 if (disconnect_count & 16) == 0 {
7066 assert!(bs_resp.2.is_none());
7068 assert!(as_resp.1.is_some());
7069 assert!(as_resp.2.is_some());
7070 assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
7072 assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
7073 assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
7074 assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
7075 assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
7076 assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
7077 assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
7079 assert!(as_resp.1.is_none());
7081 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap();
7082 let events_3 = nodes[0].node.get_and_clear_pending_events();
7083 assert_eq!(events_3.len(), 1);
7085 Event::PaymentSent { ref payment_preimage } => {
7086 assert_eq!(*payment_preimage, payment_preimage_1);
7088 _ => panic!("Unexpected event"),
7091 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap();
7092 let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
7093 // No commitment_signed so get_event_msg's assert(len == 1) passes
7094 check_added_monitors!(nodes[0], 1);
7096 as_resp.1 = Some(as_resp_raa);
7100 if disconnect_count & !disconnect_flags > 1 {
7101 let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
7103 if (disconnect_count & 16) == 0 {
7104 assert!(reestablish_1 == second_reestablish_1);
7105 assert!(reestablish_2 == second_reestablish_2);
7107 assert!(as_resp == second_as_resp);
7108 assert!(bs_resp == second_bs_resp);
7111 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
7113 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
7114 assert_eq!(events_4.len(), 2);
7115 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
7116 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
7117 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
7120 _ => panic!("Unexpected event"),
7124 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
7126 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
7127 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
7128 let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
7129 // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
7130 check_added_monitors!(nodes[1], 1);
7132 if disconnect_count & !disconnect_flags > 2 {
7133 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7135 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
7136 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
7138 assert!(as_resp.2.is_none());
7139 assert!(bs_resp.2.is_none());
7142 let as_commitment_update;
7143 let bs_second_commitment_update;
7145 macro_rules! handle_bs_raa { () => {
7146 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
7147 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7148 assert!(as_commitment_update.update_add_htlcs.is_empty());
7149 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
7150 assert!(as_commitment_update.update_fail_htlcs.is_empty());
7151 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
7152 assert!(as_commitment_update.update_fee.is_none());
7153 check_added_monitors!(nodes[0], 1);
7156 macro_rules! handle_initial_raa { () => {
7157 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap();
7158 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7159 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
7160 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
7161 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
7162 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
7163 assert!(bs_second_commitment_update.update_fee.is_none());
7164 check_added_monitors!(nodes[1], 1);
7167 if (disconnect_count & 8) == 0 {
7170 if disconnect_count & !disconnect_flags > 3 {
7171 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7173 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
7174 assert!(bs_resp.1.is_none());
7176 assert!(as_resp.2.unwrap() == as_commitment_update);
7177 assert!(bs_resp.2.is_none());
7179 assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
7182 handle_initial_raa!();
7184 if disconnect_count & !disconnect_flags > 4 {
7185 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7187 assert!(as_resp.1.is_none());
7188 assert!(bs_resp.1.is_none());
7190 assert!(as_resp.2.unwrap() == as_commitment_update);
7191 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
7194 handle_initial_raa!();
7196 if disconnect_count & !disconnect_flags > 3 {
7197 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7199 assert!(as_resp.1.is_none());
7200 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
7202 assert!(as_resp.2.is_none());
7203 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
7205 assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
7210 if disconnect_count & !disconnect_flags > 4 {
7211 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
7213 assert!(as_resp.1.is_none());
7214 assert!(bs_resp.1.is_none());
7216 assert!(as_resp.2.unwrap() == as_commitment_update);
7217 assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
7221 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap();
7222 let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
7223 // No commitment_signed so get_event_msg's assert(len == 1) passes
7224 check_added_monitors!(nodes[0], 1);
7226 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap();
7227 let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
7228 // No commitment_signed so get_event_msg's assert(len == 1) passes
7229 check_added_monitors!(nodes[1], 1);
7231 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
7232 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7233 check_added_monitors!(nodes[1], 1);
7235 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
7236 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7237 check_added_monitors!(nodes[0], 1);
7239 expect_pending_htlcs_forwardable!(nodes[1]);
7241 let events_5 = nodes[1].node.get_and_clear_pending_events();
7242 assert_eq!(events_5.len(), 1);
7244 Event::PaymentReceived { ref payment_hash, amt } => {
7245 assert_eq!(payment_hash_2, *payment_hash);
7246 assert_eq!(amt, 1000000);
7248 _ => panic!("Unexpected event"),
7251 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
7255 fn test_monitor_temporary_update_fail_a() {
7256 do_test_monitor_temporary_update_fail(0);
7257 do_test_monitor_temporary_update_fail(1);
7258 do_test_monitor_temporary_update_fail(2);
7259 do_test_monitor_temporary_update_fail(3);
7260 do_test_monitor_temporary_update_fail(4);
7261 do_test_monitor_temporary_update_fail(5);
7265 fn test_monitor_temporary_update_fail_b() {
7266 do_test_monitor_temporary_update_fail(2 | 8);
7267 do_test_monitor_temporary_update_fail(3 | 8);
7268 do_test_monitor_temporary_update_fail(4 | 8);
7269 do_test_monitor_temporary_update_fail(5 | 8);
7273 fn test_monitor_temporary_update_fail_c() {
7274 do_test_monitor_temporary_update_fail(1 | 16);
7275 do_test_monitor_temporary_update_fail(2 | 16);
7276 do_test_monitor_temporary_update_fail(3 | 16);
7277 do_test_monitor_temporary_update_fail(2 | 8 | 16);
7278 do_test_monitor_temporary_update_fail(3 | 8 | 16);
7282 fn test_invalid_channel_announcement() {
7283 //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
7284 let secp_ctx = Secp256k1::new();
7285 let nodes = create_network(2);
7287 let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
7289 let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
7290 let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
7291 let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
7292 let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
7294 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
7296 let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
7297 let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
7299 let as_network_key = nodes[0].node.get_our_node_id();
7300 let bs_network_key = nodes[1].node.get_our_node_id();
7302 let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
7304 let mut chan_announcement;
7306 macro_rules! dummy_unsigned_msg {
7308 msgs::UnsignedChannelAnnouncement {
7309 features: msgs::GlobalFeatures::new(),
7310 chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
7311 short_channel_id: as_chan.get_short_channel_id().unwrap(),
7312 node_id_1: if were_node_one { as_network_key } else { bs_network_key },
7313 node_id_2: if were_node_one { bs_network_key } else { as_network_key },
7314 bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
7315 bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
7316 excess_data: Vec::new(),
7321 macro_rules! sign_msg {
7322 ($unsigned_msg: expr) => {
7323 let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
7324 let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
7325 let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
7326 let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key);
7327 let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key);
7328 chan_announcement = msgs::ChannelAnnouncement {
7329 node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
7330 node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
7331 bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
7332 bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
7333 contents: $unsigned_msg
7338 let unsigned_msg = dummy_unsigned_msg!();
7339 sign_msg!(unsigned_msg);
7340 assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
7341 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
7343 // Configured with Network::Testnet
7344 let mut unsigned_msg = dummy_unsigned_msg!();
7345 unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
7346 sign_msg!(unsigned_msg);
7347 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
7349 let mut unsigned_msg = dummy_unsigned_msg!();
7350 unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
7351 sign_msg!(unsigned_msg);
7352 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
7355 struct VecWriter(Vec<u8>);
7356 impl Writer for VecWriter {
7357 fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
7358 self.0.extend_from_slice(buf);
7361 fn size_hint(&mut self, size: usize) {
7362 self.0.reserve_exact(size);
7367 fn test_no_txn_manager_serialize_deserialize() {
7368 let mut nodes = create_network(2);
7370 let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
7372 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7374 let nodes_0_serialized = nodes[0].node.encode();
7375 let mut chan_0_monitor_serialized = VecWriter(Vec::new());
7376 nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
7378 nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
7379 let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
7380 let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
7381 assert!(chan_0_monitor_read.is_empty());
7383 let mut nodes_0_read = &nodes_0_serialized[..];
7384 let config = UserConfig::new();
7385 let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
7386 let (_, nodes_0_deserialized) = {
7387 let mut channel_monitors = HashMap::new();
7388 channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
7389 <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
7390 default_config: config,
7392 fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
7393 monitor: nodes[0].chan_monitor.clone(),
7394 chain_monitor: nodes[0].chain_monitor.clone(),
7395 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
7396 logger: Arc::new(test_utils::TestLogger::new()),
7397 channel_monitors: &channel_monitors,
7400 assert!(nodes_0_read.is_empty());
7402 assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
7403 nodes[0].node = Arc::new(nodes_0_deserialized);
7404 let nodes_0_as_listener: Arc<ChainListener> = nodes[0].node.clone();
7405 nodes[0].chain_monitor.register_listener(Arc::downgrade(&nodes_0_as_listener));
7406 assert_eq!(nodes[0].node.list_channels().len(), 1);
7407 check_added_monitors!(nodes[0], 1);
7409 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
7410 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7411 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
7412 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7414 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
7415 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
7416 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
7417 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7419 let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
7420 let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
7421 for node in nodes.iter() {
7422 assert!(node.router.handle_channel_announcement(&announcement).unwrap());
7423 node.router.handle_channel_update(&as_update).unwrap();
7424 node.router.handle_channel_update(&bs_update).unwrap();
7427 send_payment(&nodes[0], &[&nodes[1]], 1000000);
7431 fn test_simple_manager_serialize_deserialize() {
7432 let mut nodes = create_network(2);
7433 create_announced_chan_between_nodes(&nodes, 0, 1);
7435 let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7436 let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7438 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7440 let nodes_0_serialized = nodes[0].node.encode();
7441 let mut chan_0_monitor_serialized = VecWriter(Vec::new());
7442 nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
7444 nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
7445 let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
7446 let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
7447 assert!(chan_0_monitor_read.is_empty());
7449 let mut nodes_0_read = &nodes_0_serialized[..];
7450 let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
7451 let (_, nodes_0_deserialized) = {
7452 let mut channel_monitors = HashMap::new();
7453 channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
7454 <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
7455 default_config: UserConfig::new(),
7457 fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
7458 monitor: nodes[0].chan_monitor.clone(),
7459 chain_monitor: nodes[0].chain_monitor.clone(),
7460 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
7461 logger: Arc::new(test_utils::TestLogger::new()),
7462 channel_monitors: &channel_monitors,
7465 assert!(nodes_0_read.is_empty());
7467 assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
7468 nodes[0].node = Arc::new(nodes_0_deserialized);
7469 check_added_monitors!(nodes[0], 1);
7471 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
7473 fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
7474 claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
7478 fn test_manager_serialize_deserialize_inconsistent_monitor() {
7479 // Test deserializing a ChannelManager with a out-of-date ChannelMonitor
7480 let mut nodes = create_network(4);
7481 create_announced_chan_between_nodes(&nodes, 0, 1);
7482 create_announced_chan_between_nodes(&nodes, 2, 0);
7483 let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3);
7485 let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
7487 // Serialize the ChannelManager here, but the monitor we keep up-to-date
7488 let nodes_0_serialized = nodes[0].node.encode();
7490 route_payment(&nodes[0], &[&nodes[3]], 1000000);
7491 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7492 nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7493 nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7495 // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
7497 let mut node_0_monitors_serialized = Vec::new();
7498 for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
7499 let mut writer = VecWriter(Vec::new());
7500 monitor.1.write_for_disk(&mut writer).unwrap();
7501 node_0_monitors_serialized.push(writer.0);
7504 nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
7505 let mut node_0_monitors = Vec::new();
7506 for serialized in node_0_monitors_serialized.iter() {
7507 let mut read = &serialized[..];
7508 let (_, monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut read, Arc::new(test_utils::TestLogger::new())).unwrap();
7509 assert!(read.is_empty());
7510 node_0_monitors.push(monitor);
7513 let mut nodes_0_read = &nodes_0_serialized[..];
7514 let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
7515 let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
7516 default_config: UserConfig::new(),
7518 fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
7519 monitor: nodes[0].chan_monitor.clone(),
7520 chain_monitor: nodes[0].chain_monitor.clone(),
7521 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
7522 logger: Arc::new(test_utils::TestLogger::new()),
7523 channel_monitors: &node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().unwrap(), monitor) }).collect(),
7525 assert!(nodes_0_read.is_empty());
7527 { // Channel close should result in a commitment tx and an HTLC tx
7528 let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
7529 assert_eq!(txn.len(), 2);
7530 assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
7531 assert_eq!(txn[1].input[0].previous_output.txid, txn[0].txid());
7534 for monitor in node_0_monitors.drain(..) {
7535 assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
7536 check_added_monitors!(nodes[0], 1);
7538 nodes[0].node = Arc::new(nodes_0_deserialized);
7540 // nodes[1] and nodes[2] have no lost state with nodes[0]...
7541 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
7542 reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
7543 //... and we can even still claim the payment!
7544 claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
7546 nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id());
7547 let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
7548 nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id());
7549 if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
7550 assert_eq!(msg.channel_id, channel_id);
7551 } else { panic!("Unexpected result"); }