1 //! The top-level channel management and payment tracking stuff lives here.
3 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
4 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
5 //! upon reconnect to the relevant peer(s).
7 //! It does not manage routing logic (see ln::router for that) nor does it manage constructing
8 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
9 //! imply it needs to fail HTLCs/payments/channels it manages).
11 use bitcoin::blockdata::block::BlockHeader;
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::blockdata::constants::genesis_block;
14 use bitcoin::network::constants::Network;
15 use bitcoin::network::serialize::BitcoinHash;
16 use bitcoin::util::hash::Sha256dHash;
18 use secp256k1::key::{SecretKey,PublicKey};
19 use secp256k1::{Secp256k1,Message};
20 use secp256k1::ecdh::SharedSecret;
23 use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
24 use chain::transaction::OutPoint;
25 use ln::channel::{Channel, ChannelError, ChannelKeys};
26 use ln::channelmonitor::ManyChannelMonitor;
27 use ln::router::{Route,RouteHop};
29 use ln::msgs::{HandleError,ChannelMessageHandler};
30 use util::{byte_utils, events, internal_traits, rng};
31 use util::sha2::Sha256;
32 use util::ser::{Readable, Writeable};
33 use util::chacha20poly1305rfc::ChaCha20;
34 use util::logger::Logger;
35 use util::errors::APIError;
38 use crypto::mac::{Mac,MacResult};
39 use crypto::hmac::Hmac;
40 use crypto::digest::Digest;
41 use crypto::symmetriccipher::SynchronousStreamCipher;
44 use std::collections::HashMap;
45 use std::collections::hash_map;
47 use std::sync::{Mutex,MutexGuard,Arc};
48 use std::sync::atomic::{AtomicUsize, Ordering};
49 use std::time::{Instant,Duration};
51 /// We hold various information about HTLC relay in the HTLC objects in Channel itself:
53 /// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
54 /// forward the HTLC with information it will give back to us when it does so, or if it should Fail
55 /// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
57 /// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
58 /// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
59 /// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
60 /// the HTLC backwards along the relevant path).
61 /// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
62 /// our payment, which we can use to decode errors or inform the user that the payment was sent.
63 mod channel_held_info {
65 use ln::router::Route;
66 use secp256k1::key::SecretKey;
67 use secp256k1::ecdh::SharedSecret;
69 /// Stores the info we will need to send when we want to forward an HTLC onwards
70 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
71 pub struct PendingForwardHTLCInfo {
72 pub(super) onion_packet: Option<msgs::OnionPacket>,
73 pub(super) incoming_shared_secret: SharedSecret,
74 pub(super) payment_hash: [u8; 32],
75 pub(super) short_channel_id: u64,
76 pub(super) amt_to_forward: u64,
77 pub(super) outgoing_cltv_value: u32,
80 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
81 pub enum HTLCFailureMsg {
82 Relay(msgs::UpdateFailHTLC),
83 Malformed(msgs::UpdateFailMalformedHTLC),
86 /// Stores whether we can't forward an HTLC or relevant forwarding info
87 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
88 pub enum PendingHTLCStatus {
89 Forward(PendingForwardHTLCInfo),
93 /// Tracks the inbound corresponding to an outbound HTLC
95 pub struct HTLCPreviousHopData {
96 pub(super) short_channel_id: u64,
97 pub(super) htlc_id: u64,
98 pub(super) incoming_packet_shared_secret: SharedSecret,
101 /// Tracks the inbound corresponding to an outbound HTLC
103 pub enum HTLCSource {
104 PreviousHopData(HTLCPreviousHopData),
107 session_priv: SecretKey,
112 pub fn dummy() -> Self {
113 HTLCSource::OutboundRoute {
114 route: Route { hops: Vec::new() },
115 session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(),
120 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
121 pub(crate) enum HTLCFailReason {
123 err: msgs::OnionErrorPacket,
131 pub(super) use self::channel_held_info::*;
133 struct MsgHandleErrInternal {
134 err: msgs::HandleError,
135 needs_channel_force_close: bool,
137 impl MsgHandleErrInternal {
139 fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
143 action: Some(msgs::ErrorAction::SendErrorMessage {
144 msg: msgs::ErrorMessage {
146 data: err.to_string()
150 needs_channel_force_close: false,
154 fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self {
158 action: Some(msgs::ErrorAction::SendErrorMessage {
159 msg: msgs::ErrorMessage {
161 data: err.to_string()
165 needs_channel_force_close: true,
169 fn from_maybe_close(err: msgs::HandleError) -> Self {
170 Self { err, needs_channel_force_close: true }
173 fn from_no_close(err: msgs::HandleError) -> Self {
174 Self { err, needs_channel_force_close: false }
177 fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
180 ChannelError::Ignore(msg) => HandleError {
182 action: Some(msgs::ErrorAction::IgnoreError),
184 ChannelError::Close(msg) => HandleError {
186 action: Some(msgs::ErrorAction::SendErrorMessage {
187 msg: msgs::ErrorMessage {
189 data: msg.to_string()
194 needs_channel_force_close: false,
198 fn from_chan_maybe_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
201 ChannelError::Ignore(msg) => HandleError {
203 action: Some(msgs::ErrorAction::IgnoreError),
205 ChannelError::Close(msg) => HandleError {
207 action: Some(msgs::ErrorAction::SendErrorMessage {
208 msg: msgs::ErrorMessage {
210 data: msg.to_string()
215 needs_channel_force_close: true,
220 /// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
221 /// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
222 /// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
223 /// probably increase this significantly.
224 const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50;
226 struct HTLCForwardInfo {
227 prev_short_channel_id: u64,
229 forward_info: PendingForwardHTLCInfo,
232 struct ChannelHolder {
233 by_id: HashMap<[u8; 32], Channel>,
234 short_to_id: HashMap<u64, [u8; 32]>,
235 next_forward: Instant,
236 /// short channel id -> forward infos. Key of 0 means payments received
237 /// Note that while this is held in the same mutex as the channels themselves, no consistency
238 /// guarantees are made about there existing a channel with the short id here, nor the short
239 /// ids in the PendingForwardHTLCInfo!
240 forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
241 /// Note that while this is held in the same mutex as the channels themselves, no consistency
242 /// guarantees are made about the channels given here actually existing anymore by the time you
244 claimable_htlcs: HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
246 struct MutChannelHolder<'a> {
247 by_id: &'a mut HashMap<[u8; 32], Channel>,
248 short_to_id: &'a mut HashMap<u64, [u8; 32]>,
249 next_forward: &'a mut Instant,
250 forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
251 claimable_htlcs: &'a mut HashMap<[u8; 32], Vec<HTLCPreviousHopData>>,
254 fn borrow_parts(&mut self) -> MutChannelHolder {
256 by_id: &mut self.by_id,
257 short_to_id: &mut self.short_to_id,
258 next_forward: &mut self.next_forward,
259 forward_htlcs: &mut self.forward_htlcs,
260 claimable_htlcs: &mut self.claimable_htlcs,
265 #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
266 const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
268 /// Manager which keeps track of a number of channels and sends messages to the appropriate
269 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
271 /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
272 /// to individual Channels.
273 pub struct ChannelManager {
274 genesis_hash: Sha256dHash,
275 fee_estimator: Arc<FeeEstimator>,
276 monitor: Arc<ManyChannelMonitor>,
277 chain_monitor: Arc<ChainWatchInterface>,
278 tx_broadcaster: Arc<BroadcasterInterface>,
280 announce_channels_publicly: bool,
281 fee_proportional_millionths: u32,
282 latest_block_height: AtomicUsize,
283 secp_ctx: Secp256k1<secp256k1::All>,
285 channel_state: Mutex<ChannelHolder>,
286 our_network_key: SecretKey,
288 pending_events: Mutex<Vec<events::Event>>,
293 const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
295 macro_rules! secp_call {
296 ( $res: expr, $err: expr ) => {
299 Err(_) => return Err($err),
306 shared_secret: SharedSecret,
308 blinding_factor: [u8; 32],
309 ephemeral_pubkey: PublicKey,
314 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
315 pub struct ChannelDetails {
316 /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
317 /// thereafter this is the txid of the funding transaction xor the funding transaction output).
318 /// Note that this means this value is *not* persistent - it can change once during the
319 /// lifetime of the channel.
320 pub channel_id: [u8; 32],
321 /// The position of the funding transaction in the chain. None if the funding transaction has
322 /// not yet been confirmed and the channel fully opened.
323 pub short_channel_id: Option<u64>,
324 /// The node_id of our counterparty
325 pub remote_network_id: PublicKey,
326 /// The value, in satoshis, of this channel as appears in the funding output
327 pub channel_value_satoshis: u64,
328 /// The user_id passed in to create_channel, or 0 if the channel was inbound.
332 impl ChannelManager {
333 /// Constructs a new ChannelManager to hold several channels and route between them.
335 /// This is the main "logic hub" for all channel-related actions, and implements
336 /// ChannelMessageHandler.
338 /// fee_proportional_millionths is an optional fee to charge any payments routed through us.
339 /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
341 /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
342 pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
343 let secp_ctx = Secp256k1::new();
345 let res = Arc::new(ChannelManager {
346 genesis_hash: genesis_block(network).header.bitcoin_hash(),
347 fee_estimator: feeest.clone(),
348 monitor: monitor.clone(),
352 announce_channels_publicly,
353 fee_proportional_millionths,
354 latest_block_height: AtomicUsize::new(0), //TODO: Get an init value (generally need to replay recent chain on chain_monitor registration)
357 channel_state: Mutex::new(ChannelHolder{
358 by_id: HashMap::new(),
359 short_to_id: HashMap::new(),
360 next_forward: Instant::now(),
361 forward_htlcs: HashMap::new(),
362 claimable_htlcs: HashMap::new(),
366 pending_events: Mutex::new(Vec::new()),
370 let weak_res = Arc::downgrade(&res);
371 res.chain_monitor.register_listener(weak_res);
375 /// Creates a new outbound channel to the given remote node and with the given value.
377 /// user_id will be provided back as user_channel_id in FundingGenerationReady and
378 /// FundingBroadcastSafe events to allow tracking of which events correspond with which
379 /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
380 /// may wish to avoid using 0 for user_id here.
382 /// If successful, will generate a SendOpenChannel event, so you should probably poll
383 /// PeerManager::process_events afterwards.
385 /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k
386 pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
387 let chan_keys = if cfg!(feature = "fuzztarget") {
389 funding_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
390 revocation_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
391 payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
392 delayed_payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
393 htlc_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
394 channel_close_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
395 channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
396 commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
399 let mut key_seed = [0u8; 32];
400 rng::fill_bytes(&mut key_seed);
401 match ChannelKeys::new_from_seed(&key_seed) {
403 Err(_) => panic!("RNG is busted!")
407 let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?;
408 let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
409 let mut channel_state = self.channel_state.lock().unwrap();
410 match channel_state.by_id.entry(channel.channel_id()) {
411 hash_map::Entry::Occupied(_) => {
412 if cfg!(feature = "fuzztarget") {
413 return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
415 panic!("RNG is bad???");
418 hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
421 let mut events = self.pending_events.lock().unwrap();
422 events.push(events::Event::SendOpenChannel {
423 node_id: their_network_key,
429 /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
430 /// more information.
431 pub fn list_channels(&self) -> Vec<ChannelDetails> {
432 let channel_state = self.channel_state.lock().unwrap();
433 let mut res = Vec::with_capacity(channel_state.by_id.len());
434 for (channel_id, channel) in channel_state.by_id.iter() {
435 res.push(ChannelDetails {
436 channel_id: (*channel_id).clone(),
437 short_channel_id: channel.get_short_channel_id(),
438 remote_network_id: channel.get_their_node_id(),
439 channel_value_satoshis: channel.get_value_satoshis(),
440 user_id: channel.get_user_id(),
446 /// Gets the list of usable channels, in random order. Useful as an argument to
447 /// Router::get_route to ensure non-announced channels are used.
448 pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
449 let channel_state = self.channel_state.lock().unwrap();
450 let mut res = Vec::with_capacity(channel_state.by_id.len());
451 for (channel_id, channel) in channel_state.by_id.iter() {
452 if channel.is_usable() {
453 res.push(ChannelDetails {
454 channel_id: (*channel_id).clone(),
455 short_channel_id: channel.get_short_channel_id(),
456 remote_network_id: channel.get_their_node_id(),
457 channel_value_satoshis: channel.get_value_satoshis(),
458 user_id: channel.get_user_id(),
465 /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
466 /// will be accepted on the given channel, and after additional timeout/the closing of all
467 /// pending HTLCs, the channel will be closed on chain.
469 /// May generate a SendShutdown event on success, which should be relayed.
470 pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
471 let (mut res, node_id, chan_option) = {
472 let mut channel_state_lock = self.channel_state.lock().unwrap();
473 let channel_state = channel_state_lock.borrow_parts();
474 match channel_state.by_id.entry(channel_id.clone()) {
475 hash_map::Entry::Occupied(mut chan_entry) => {
476 let res = chan_entry.get_mut().get_shutdown()?;
477 if chan_entry.get().is_shutdown() {
478 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
479 channel_state.short_to_id.remove(&short_id);
481 (res, chan_entry.get().get_their_node_id(), Some(chan_entry.remove_entry().1))
482 } else { (res, chan_entry.get().get_their_node_id(), None) }
484 hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
487 for htlc_source in res.1.drain(..) {
488 // unknown_next_peer...I dunno who that is anymore....
489 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
491 let chan_update = if let Some(chan) = chan_option {
492 if let Ok(update) = self.get_channel_update(&chan) {
497 let mut events = self.pending_events.lock().unwrap();
498 if let Some(update) = chan_update {
499 events.push(events::Event::BroadcastChannelUpdate {
503 events.push(events::Event::SendShutdown {
512 fn finish_force_close_channel(&self, shutdown_res: (Vec<Transaction>, Vec<(HTLCSource, [u8; 32])>)) {
513 let (local_txn, mut failed_htlcs) = shutdown_res;
514 for htlc_source in failed_htlcs.drain(..) {
515 // unknown_next_peer...I dunno who that is anymore....
516 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
518 for tx in local_txn {
519 self.tx_broadcaster.broadcast_transaction(&tx);
521 //TODO: We need to have a way where outbound HTLC claims can result in us claiming the
522 //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
523 //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
524 //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
525 //timeouts are hit and our claims confirm).
526 //TODO: In any case, we need to make sure we remove any pending htlc tracking (via
527 //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
530 /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
531 /// the chain and rejecting new HTLCs on the given channel.
532 pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
534 let mut channel_state_lock = self.channel_state.lock().unwrap();
535 let channel_state = channel_state_lock.borrow_parts();
536 if let Some(chan) = channel_state.by_id.remove(channel_id) {
537 if let Some(short_id) = chan.get_short_channel_id() {
538 channel_state.short_to_id.remove(&short_id);
545 self.finish_force_close_channel(chan.force_shutdown());
546 let mut events = self.pending_events.lock().unwrap();
547 if let Ok(update) = self.get_channel_update(&chan) {
548 events.push(events::Event::BroadcastChannelUpdate {
554 /// Force close all channels, immediately broadcasting the latest local commitment transaction
555 /// for each to the chain and rejecting new HTLCs on each.
556 pub fn force_close_all_channels(&self) {
557 for chan in self.list_channels() {
558 self.force_close_channel(&chan.channel_id);
563 fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) {
565 let mut hmac = Hmac::new(Sha256::new(), &[0x72, 0x68, 0x6f]); // rho
566 hmac.input(&shared_secret[..]);
567 let mut res = [0; 32];
568 hmac.raw_result(&mut res);
572 let mut hmac = Hmac::new(Sha256::new(), &[0x6d, 0x75]); // mu
573 hmac.input(&shared_secret[..]);
574 let mut res = [0; 32];
575 hmac.raw_result(&mut res);
581 fn gen_um_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] {
582 let mut hmac = Hmac::new(Sha256::new(), &[0x75, 0x6d]); // um
583 hmac.input(&shared_secret[..]);
584 let mut res = [0; 32];
585 hmac.raw_result(&mut res);
590 fn gen_ammag_from_shared_secret(shared_secret: &SharedSecret) -> [u8; 32] {
591 let mut hmac = Hmac::new(Sha256::new(), &[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
592 hmac.input(&shared_secret[..]);
593 let mut res = [0; 32];
594 hmac.raw_result(&mut res);
598 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
600 fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(SharedSecret, [u8; 32], PublicKey, &RouteHop)> (secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey, mut callback: FType) -> Result<(), secp256k1::Error> {
601 let mut blinded_priv = session_priv.clone();
602 let mut blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
604 for hop in route.hops.iter() {
605 let shared_secret = SharedSecret::new(secp_ctx, &hop.pubkey, &blinded_priv);
607 let mut sha = Sha256::new();
608 sha.input(&blinded_pub.serialize()[..]);
609 sha.input(&shared_secret[..]);
610 let mut blinding_factor = [0u8; 32];
611 sha.result(&mut blinding_factor);
613 let ephemeral_pubkey = blinded_pub;
615 blinded_priv.mul_assign(secp_ctx, &SecretKey::from_slice(secp_ctx, &blinding_factor)?)?;
616 blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
618 callback(shared_secret, blinding_factor, ephemeral_pubkey, hop);
624 // can only fail if an intermediary hop has an invalid public key or session_priv is invalid
625 fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, secp256k1::Error> {
626 let mut res = Vec::with_capacity(route.hops.len());
628 Self::construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _| {
629 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
635 blinding_factor: _blinding_factor,
645 /// returns the hop data, as well as the first-hop value_msat and CLTV value we should send.
646 fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec<msgs::OnionHopData>, u64, u32), APIError> {
647 let mut cur_value_msat = 0u64;
648 let mut cur_cltv = starting_htlc_offset;
649 let mut last_short_channel_id = 0;
650 let mut res: Vec<msgs::OnionHopData> = Vec::with_capacity(route.hops.len());
651 internal_traits::test_no_dealloc::<msgs::OnionHopData>(None);
652 unsafe { res.set_len(route.hops.len()); }
654 for (idx, hop) in route.hops.iter().enumerate().rev() {
655 // First hop gets special values so that it can check, on receipt, that everything is
656 // exactly as it should be (and the next hop isn't trying to probe to find out if we're
657 // the intended recipient).
658 let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
659 let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
660 res[idx] = msgs::OnionHopData {
662 data: msgs::OnionRealm0HopData {
663 short_channel_id: last_short_channel_id,
664 amt_to_forward: value_msat,
665 outgoing_cltv_value: cltv,
669 cur_value_msat += hop.fee_msat;
670 if cur_value_msat >= 21000000 * 100000000 * 1000 {
671 return Err(APIError::RouteError{err: "Channel fees overflowed?!"});
673 cur_cltv += hop.cltv_expiry_delta as u32;
674 if cur_cltv >= 500000000 {
675 return Err(APIError::RouteError{err: "Channel CLTV overflowed?!"});
677 last_short_channel_id = hop.short_channel_id;
679 Ok((res, cur_value_msat, cur_cltv))
683 fn shift_arr_right(arr: &mut [u8; 20*65]) {
685 ptr::copy(arr[0..].as_ptr(), arr[65..].as_mut_ptr(), 19*65);
693 fn xor_bufs(dst: &mut[u8], src: &[u8]) {
694 assert_eq!(dst.len(), src.len());
696 for i in 0..dst.len() {
701 const ZERO:[u8; 21*65] = [0; 21*65];
702 fn construct_onion_packet(mut payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, associated_data: &[u8; 32]) -> msgs::OnionPacket {
703 let mut buf = Vec::with_capacity(21*65);
704 buf.resize(21*65, 0);
707 let iters = payloads.len() - 1;
708 let end_len = iters * 65;
709 let mut res = Vec::with_capacity(end_len);
710 res.resize(end_len, 0);
712 for (i, keys) in onion_keys.iter().enumerate() {
713 if i == payloads.len() - 1 { continue; }
714 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
715 chacha.process(&ChannelManager::ZERO, &mut buf); // We don't have a seek function :(
716 ChannelManager::xor_bufs(&mut res[0..(i + 1)*65], &buf[(20 - i)*65..21*65]);
721 let mut packet_data = [0; 20*65];
722 let mut hmac_res = [0; 32];
724 for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
725 ChannelManager::shift_arr_right(&mut packet_data);
726 payload.hmac = hmac_res;
727 packet_data[0..65].copy_from_slice(&payload.encode()[..]);
729 let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
730 chacha.process(&packet_data, &mut buf[0..20*65]);
731 packet_data[..].copy_from_slice(&buf[0..20*65]);
734 packet_data[20*65 - filler.len()..20*65].copy_from_slice(&filler[..]);
737 let mut hmac = Hmac::new(Sha256::new(), &keys.mu);
738 hmac.input(&packet_data);
739 hmac.input(&associated_data[..]);
740 hmac.raw_result(&mut hmac_res);
745 public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
746 hop_data: packet_data,
751 /// Encrypts a failure packet. raw_packet can either be a
752 /// msgs::DecodedOnionErrorPacket.encode() result or a msgs::OnionErrorPacket.data element.
753 fn encrypt_failure_packet(shared_secret: &SharedSecret, raw_packet: &[u8]) -> msgs::OnionErrorPacket {
754 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
756 let mut packet_crypted = Vec::with_capacity(raw_packet.len());
757 packet_crypted.resize(raw_packet.len(), 0);
758 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
759 chacha.process(&raw_packet, &mut packet_crypted[..]);
760 msgs::OnionErrorPacket {
761 data: packet_crypted,
765 fn build_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket {
766 assert!(failure_data.len() <= 256 - 2);
768 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
771 let mut res = Vec::with_capacity(2 + failure_data.len());
772 res.push(((failure_type >> 8) & 0xff) as u8);
773 res.push(((failure_type >> 0) & 0xff) as u8);
774 res.extend_from_slice(&failure_data[..]);
778 let mut res = Vec::with_capacity(256 - 2 - failure_data.len());
779 res.resize(256 - 2 - failure_data.len(), 0);
782 let mut packet = msgs::DecodedOnionErrorPacket {
784 failuremsg: failuremsg,
788 let mut hmac = Hmac::new(Sha256::new(), &um);
789 hmac.input(&packet.encode()[32..]);
790 hmac.raw_result(&mut packet.hmac);
796 fn build_first_hop_failure_packet(shared_secret: &SharedSecret, failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
797 let failure_packet = ChannelManager::build_failure_packet(shared_secret, failure_type, failure_data);
798 ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
801 fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder>) {
802 macro_rules! get_onion_hash {
805 let mut sha = Sha256::new();
806 sha.input(&msg.onion_routing_packet.hop_data);
807 let mut onion_hash = [0; 32];
808 sha.result(&mut onion_hash);
814 if let Err(_) = msg.onion_routing_packet.public_key {
815 log_info!(self, "Failed to accept/forward incoming HTLC with invalid ephemeral pubkey");
816 return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
817 channel_id: msg.channel_id,
818 htlc_id: msg.htlc_id,
819 sha256_of_onion: get_onion_hash!(),
820 failure_code: 0x8000 | 0x4000 | 6,
821 })), self.channel_state.lock().unwrap());
824 let shared_secret = SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key);
825 let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
827 let mut channel_state = None;
828 macro_rules! return_err {
829 ($msg: expr, $err_code: expr, $data: expr) => {
831 log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
832 if channel_state.is_none() {
833 channel_state = Some(self.channel_state.lock().unwrap());
835 return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
836 channel_id: msg.channel_id,
837 htlc_id: msg.htlc_id,
838 reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
839 })), channel_state.unwrap());
844 if msg.onion_routing_packet.version != 0 {
845 //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
846 //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
847 //the hash doesn't really serve any purpuse - in the case of hashing all data, the
848 //receiving node would have to brute force to figure out which version was put in the
849 //packet by the node that send us the message, in the case of hashing the hop_data, the
850 //node knows the HMAC matched, so they already know what is there...
851 return_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4, &get_onion_hash!());
854 let mut hmac = Hmac::new(Sha256::new(), &mu);
855 hmac.input(&msg.onion_routing_packet.hop_data);
856 hmac.input(&msg.payment_hash);
857 if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) {
858 return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!());
861 let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
862 let next_hop_data = {
863 let mut decoded = [0; 65];
864 chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
865 match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
867 let error_code = match err {
868 msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
869 _ => 0x2000 | 2, // Should never happen
871 return_err!("Unable to decode our hop data", error_code, &[0;0]);
877 //TODO: Check that msg.cltv_expiry is within acceptable bounds!
879 let pending_forward_info = if next_hop_data.hmac == [0; 32] {
881 if next_hop_data.data.amt_to_forward != msg.amount_msat {
882 return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
884 if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
885 return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
888 // Note that we could obviously respond immediately with an update_fulfill_htlc
889 // message, however that would leak that we are the recipient of this payment, so
890 // instead we stay symmetric with the forwarding case, only responding (after a
891 // delay) once they've send us a commitment_signed!
893 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
895 payment_hash: msg.payment_hash.clone(),
897 incoming_shared_secret: shared_secret.clone(),
898 amt_to_forward: next_hop_data.data.amt_to_forward,
899 outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
902 let mut new_packet_data = [0; 20*65];
903 chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
904 chacha.process(&ChannelManager::ZERO[0..65], &mut new_packet_data[19*65..]);
906 let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
908 let blinding_factor = {
909 let mut sha = Sha256::new();
910 sha.input(&new_pubkey.serialize()[..]);
911 sha.input(&shared_secret[..]);
912 let mut res = [0u8; 32];
913 sha.result(&mut res);
914 match SecretKey::from_slice(&self.secp_ctx, &res) {
916 return_err!("Blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
922 if let Err(_) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) {
923 return_err!("New blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
926 let outgoing_packet = msgs::OnionPacket {
928 public_key: Ok(new_pubkey),
929 hop_data: new_packet_data,
930 hmac: next_hop_data.hmac.clone(),
933 PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
934 onion_packet: Some(outgoing_packet),
935 payment_hash: msg.payment_hash.clone(),
936 short_channel_id: next_hop_data.data.short_channel_id,
937 incoming_shared_secret: shared_secret.clone(),
938 amt_to_forward: next_hop_data.data.amt_to_forward,
939 outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
943 channel_state = Some(self.channel_state.lock().unwrap());
944 if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
945 if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
946 let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
947 let forwarding_id = match id_option {
949 return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
951 Some(id) => id.clone(),
953 if let Some((err, code, chan_update)) = {
954 let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
956 Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap()))
958 let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
959 if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward {
960 Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap()))
962 if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 {
963 Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap()))
970 return_err!(err, code, &chan_update.encode_with_len()[..]);
975 (pending_forward_info, channel_state.unwrap())
978 /// only fails if the channel does not yet have an assigned short_id
979 fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
980 let short_channel_id = match chan.get_short_channel_id() {
981 None => return Err(HandleError{err: "Channel not yet established", action: None}),
985 let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
987 let unsigned = msgs::UnsignedChannelUpdate {
988 chain_hash: self.genesis_hash,
989 short_channel_id: short_channel_id,
990 timestamp: chan.get_channel_update_count(),
991 flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
992 cltv_expiry_delta: CLTV_EXPIRY_DELTA,
993 htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
994 fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
995 fee_proportional_millionths: self.fee_proportional_millionths,
996 excess_data: Vec::new(),
999 let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
1000 let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); //TODO Can we unwrap here?
1002 Ok(msgs::ChannelUpdate {
1008 /// Sends a payment along a given route.
1010 /// Value parameters are provided via the last hop in route, see documentation for RouteHop
1011 /// fields for more info.
1013 /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
1014 /// payment), we don't do anything to stop you! We always try to ensure that if the provided
1015 /// next hop knows the preimage to payment_hash they can claim an additional amount as
1016 /// specified in the last hop in the route! Thus, you should probably do your own
1017 /// payment_preimage tracking (which you should already be doing as they represent "proof of
1018 /// payment") and prevent double-sends yourself.
1020 /// May generate a SendHTLCs event on success, which should be relayed.
1022 /// Raises APIError::RoutError when invalid route or forward parameter
1023 /// (cltv_delta, fee, node public key) is specified
1024 pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
1025 if route.hops.len() < 1 || route.hops.len() > 20 {
1026 return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
1028 let our_node_id = self.get_our_node_id();
1029 for (idx, hop) in route.hops.iter().enumerate() {
1030 if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
1031 return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
1035 let session_priv = SecretKey::from_slice(&self.secp_ctx, &{
1036 let mut session_key = [0; 32];
1037 rng::fill_bytes(&mut session_key);
1039 }).expect("RNG is bad!");
1041 let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
1043 let onion_keys = secp_call!(ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
1044 APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
1045 let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
1046 let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
1048 let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = {
1049 let mut channel_state_lock = self.channel_state.lock().unwrap();
1050 let channel_state = channel_state_lock.borrow_parts();
1052 let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
1053 None => return Err(APIError::RouteError{err: "No channel available with first hop!"}),
1054 Some(id) => id.clone(),
1058 let chan = channel_state.by_id.get_mut(&id).unwrap();
1059 if chan.get_their_node_id() != route.hops.first().unwrap().pubkey {
1060 return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
1062 if !chan.is_live() {
1063 return Err(APIError::RouteError{err: "Peer for first hop currently disconnected!"});
1065 chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
1066 route: route.clone(),
1067 session_priv: session_priv.clone(),
1068 }, onion_packet).map_err(|he| APIError::RouteError{err: he.err})?
1071 let first_hop_node_id = route.hops.first().unwrap().pubkey;
1074 Some(msgs) => (first_hop_node_id, msgs),
1075 None => return Ok(()),
1079 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1083 let mut events = self.pending_events.lock().unwrap();
1084 events.push(events::Event::UpdateHTLCs {
1085 node_id: first_hop_node_id,
1086 updates: msgs::CommitmentUpdate {
1087 update_add_htlcs: vec![update_add],
1088 update_fulfill_htlcs: Vec::new(),
1089 update_fail_htlcs: Vec::new(),
1090 update_fail_malformed_htlcs: Vec::new(),
1098 /// Call this upon creation of a funding transaction for the given channel.
1100 /// Panics if a funding transaction has already been provided for this channel.
1102 /// May panic if the funding_txo is duplicative with some other channel (note that this should
1103 /// be trivially prevented by using unique funding transaction keys per-channel).
1104 pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
1106 macro_rules! add_pending_event {
1109 let mut pending_events = self.pending_events.lock().unwrap();
1110 pending_events.push($event);
1115 let (chan, msg, chan_monitor) = {
1116 let mut channel_state = self.channel_state.lock().unwrap();
1117 match channel_state.by_id.remove(temporary_channel_id) {
1119 match chan.get_outbound_funding_created(funding_txo) {
1120 Ok(funding_msg) => {
1121 (chan, funding_msg.0, funding_msg.1)
1124 log_error!(self, "Got bad signatures: {}!", e.err);
1125 mem::drop(channel_state);
1126 add_pending_event!(events::Event::HandleError {
1127 node_id: chan.get_their_node_id(),
1136 }; // Release channel lock for install_watch_outpoint call,
1137 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1140 add_pending_event!(events::Event::SendFundingCreated {
1141 node_id: chan.get_their_node_id(),
1145 let mut channel_state = self.channel_state.lock().unwrap();
1146 match channel_state.by_id.entry(chan.channel_id()) {
1147 hash_map::Entry::Occupied(_) => {
1148 panic!("Generated duplicate funding txid?");
1150 hash_map::Entry::Vacant(e) => {
1156 fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
1157 if !chan.should_announce() { return None }
1159 let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
1161 Err(_) => return None, // Only in case of state precondition violations eg channel is closing
1163 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
1164 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
1166 Some(msgs::AnnouncementSignatures {
1167 channel_id: chan.channel_id(),
1168 short_channel_id: chan.get_short_channel_id().unwrap(),
1169 node_signature: our_node_sig,
1170 bitcoin_signature: our_bitcoin_sig,
1174 /// Processes HTLCs which are pending waiting on random forward delay.
1176 /// Should only really ever be called in response to an PendingHTLCsForwardable event.
1177 /// Will likely generate further events.
1178 pub fn process_pending_htlc_forwards(&self) {
1179 let mut new_events = Vec::new();
1180 let mut failed_forwards = Vec::new();
1182 let mut channel_state_lock = self.channel_state.lock().unwrap();
1183 let channel_state = channel_state_lock.borrow_parts();
1185 if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward {
1189 for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
1190 if short_chan_id != 0 {
1191 let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
1192 Some(chan_id) => chan_id.clone(),
1194 failed_forwards.reserve(pending_forwards.len());
1195 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1196 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1197 short_channel_id: prev_short_channel_id,
1198 htlc_id: prev_htlc_id,
1199 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1201 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
1206 let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
1208 let mut add_htlc_msgs = Vec::new();
1209 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1210 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
1211 short_channel_id: prev_short_channel_id,
1212 htlc_id: prev_htlc_id,
1213 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1215 match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
1217 let chan_update = self.get_channel_update(forward_chan).unwrap();
1218 failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
1223 Some(msg) => { add_htlc_msgs.push(msg); },
1225 // Nothing to do here...we're waiting on a remote
1226 // revoke_and_ack before we can add anymore HTLCs. The Channel
1227 // will automatically handle building the update_add_htlc and
1228 // commitment_signed messages when we can.
1229 // TODO: Do some kind of timer to set the channel as !is_live()
1230 // as we don't really want others relying on us relaying through
1231 // this channel currently :/.
1238 if !add_htlc_msgs.is_empty() {
1239 let (commitment_msg, monitor) = match forward_chan.send_commitment() {
1242 if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action {
1243 } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action {
1245 panic!("Stated return value requirements in send_commitment() were not met");
1247 //TODO: Handle...this is bad!
1251 new_events.push((Some(monitor), events::Event::UpdateHTLCs {
1252 node_id: forward_chan.get_their_node_id(),
1253 updates: msgs::CommitmentUpdate {
1254 update_add_htlcs: add_htlc_msgs,
1255 update_fulfill_htlcs: Vec::new(),
1256 update_fail_htlcs: Vec::new(),
1257 update_fail_malformed_htlcs: Vec::new(),
1259 commitment_signed: commitment_msg,
1264 for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
1265 let prev_hop_data = HTLCPreviousHopData {
1266 short_channel_id: prev_short_channel_id,
1267 htlc_id: prev_htlc_id,
1268 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
1270 match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
1271 hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
1272 hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
1274 new_events.push((None, events::Event::PaymentReceived {
1275 payment_hash: forward_info.payment_hash,
1276 amt: forward_info.amt_to_forward,
1283 for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
1285 None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
1286 Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
1290 if new_events.is_empty() { return }
1292 new_events.retain(|event| {
1293 if let &Some(ref monitor) = &event.0 {
1294 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) {
1295 unimplemented!();// but def dont push the event...
1301 let mut events = self.pending_events.lock().unwrap();
1302 events.reserve(new_events.len());
1303 for event in new_events.drain(..) {
1304 events.push(event.1);
1308 /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
1309 pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool {
1310 let mut channel_state = Some(self.channel_state.lock().unwrap());
1311 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
1312 if let Some(mut sources) = removed_source {
1313 for htlc_with_hash in sources.drain(..) {
1314 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1315 self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() });
1321 /// Fails an HTLC backwards to the sender of it to us.
1322 /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
1323 /// There are several callsites that do stupid things like loop over a list of payment_hashes
1324 /// to fail and take the channel_state lock for each iteration (as we take ownership and may
1325 /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
1326 /// still-available channels.
1327 fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) {
1329 HTLCSource::OutboundRoute { .. } => {
1330 mem::drop(channel_state);
1332 let mut pending_events = self.pending_events.lock().unwrap();
1333 pending_events.push(events::Event::PaymentFailed {
1334 payment_hash: payment_hash.clone()
1337 HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
1338 let err_packet = match onion_error {
1339 HTLCFailReason::Reason { failure_code, data } => {
1340 let packet = ChannelManager::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
1341 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
1343 HTLCFailReason::ErrorPacket { err } => {
1344 ChannelManager::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
1348 let (node_id, fail_msgs) = {
1349 let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1350 Some(chan_id) => chan_id.clone(),
1354 let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1355 match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
1356 Ok(msg) => (chan.get_their_node_id(), msg),
1358 //TODO: Do something with e?
1365 Some((msg, commitment_msg, chan_monitor)) => {
1366 mem::drop(channel_state);
1368 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1369 unimplemented!();// but def dont push the event...
1372 let mut pending_events = self.pending_events.lock().unwrap();
1373 pending_events.push(events::Event::UpdateHTLCs {
1375 updates: msgs::CommitmentUpdate {
1376 update_add_htlcs: Vec::new(),
1377 update_fulfill_htlcs: Vec::new(),
1378 update_fail_htlcs: vec![msg],
1379 update_fail_malformed_htlcs: Vec::new(),
1381 commitment_signed: commitment_msg,
1391 /// Provides a payment preimage in response to a PaymentReceived event, returning true and
1392 /// generating message events for the net layer to claim the payment, if possible. Thus, you
1393 /// should probably kick the net layer to go send messages if this returns true!
1395 /// May panic if called except in response to a PaymentReceived event.
1396 pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
1397 let mut sha = Sha256::new();
1398 sha.input(&payment_preimage);
1399 let mut payment_hash = [0; 32];
1400 sha.result(&mut payment_hash);
1402 let mut channel_state = Some(self.channel_state.lock().unwrap());
1403 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
1404 if let Some(mut sources) = removed_source {
1405 for htlc_with_hash in sources.drain(..) {
1406 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
1407 self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
1412 fn claim_funds_internal(&self, mut channel_state: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: [u8; 32]) {
1414 HTLCSource::OutboundRoute { .. } => {
1415 mem::drop(channel_state);
1416 let mut pending_events = self.pending_events.lock().unwrap();
1417 pending_events.push(events::Event::PaymentSent {
1421 HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
1422 //TODO: Delay the claimed_funds relaying just like we do outbound relay!
1423 let (node_id, fulfill_msgs) = {
1424 let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
1425 Some(chan_id) => chan_id.clone(),
1427 // TODO: There is probably a channel manager somewhere that needs to
1428 // learn the preimage as the channel already hit the chain and that's
1434 let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
1435 match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
1436 Ok(msg) => (chan.get_their_node_id(), msg),
1438 // TODO: There is probably a channel manager somewhere that needs to
1439 // learn the preimage as the channel may be about to hit the chain.
1440 //TODO: Do something with e?
1446 mem::drop(channel_state);
1447 if let Some(chan_monitor) = fulfill_msgs.1 {
1448 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1449 unimplemented!();// but def dont push the event...
1453 if let Some((msg, commitment_msg)) = fulfill_msgs.0 {
1454 let mut pending_events = self.pending_events.lock().unwrap();
1455 pending_events.push(events::Event::UpdateHTLCs {
1457 updates: msgs::CommitmentUpdate {
1458 update_add_htlcs: Vec::new(),
1459 update_fulfill_htlcs: vec![msg],
1460 update_fail_htlcs: Vec::new(),
1461 update_fail_malformed_htlcs: Vec::new(),
1463 commitment_signed: commitment_msg,
1471 /// Gets the node_id held by this ChannelManager
1472 pub fn get_our_node_id(&self) -> PublicKey {
1473 PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
1476 /// Used to restore channels to normal operation after a
1477 /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
1479 pub fn test_restore_channel_monitor(&self) {
1483 fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, MsgHandleErrInternal> {
1484 if msg.chain_hash != self.genesis_hash {
1485 return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
1487 let mut channel_state = self.channel_state.lock().unwrap();
1488 if channel_state.by_id.contains_key(&msg.temporary_channel_id) {
1489 return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone()));
1492 let chan_keys = if cfg!(feature = "fuzztarget") {
1494 funding_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]).unwrap(),
1495 revocation_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0]).unwrap(),
1496 payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0]).unwrap(),
1497 delayed_payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0]).unwrap(),
1498 htlc_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0]).unwrap(),
1499 channel_close_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0]).unwrap(),
1500 channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0]).unwrap(),
1501 commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1504 let mut key_seed = [0u8; 32];
1505 rng::fill_bytes(&mut key_seed);
1506 match ChannelKeys::new_from_seed(&key_seed) {
1508 Err(_) => panic!("RNG is busted!")
1512 let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger))
1513 .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
1514 let accept_msg = channel.get_accept_channel();
1515 channel_state.by_id.insert(channel.channel_id(), channel);
1519 fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
1520 let (value, output_script, user_id) = {
1521 let mut channel_state = self.channel_state.lock().unwrap();
1522 match channel_state.by_id.get_mut(&msg.temporary_channel_id) {
1524 if chan.get_their_node_id() != *their_node_id {
1525 //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
1526 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1528 chan.accept_channel(&msg)
1529 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?;
1530 (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id())
1532 //TODO: same as above
1533 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1536 let mut pending_events = self.pending_events.lock().unwrap();
1537 pending_events.push(events::Event::FundingGenerationReady {
1538 temporary_channel_id: msg.temporary_channel_id,
1539 channel_value_satoshis: value,
1540 output_script: output_script,
1541 user_channel_id: user_id,
1546 fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, MsgHandleErrInternal> {
1547 let (chan, funding_msg, monitor_update) = {
1548 let mut channel_state = self.channel_state.lock().unwrap();
1549 match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
1550 hash_map::Entry::Occupied(mut chan) => {
1551 if chan.get().get_their_node_id() != *their_node_id {
1552 //TODO: here and below MsgHandleErrInternal, #153 case
1553 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
1555 match chan.get_mut().funding_created(msg) {
1556 Ok((funding_msg, monitor_update)) => {
1557 (chan.remove(), funding_msg, monitor_update)
1560 return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1564 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
1566 }; // Release channel lock for install_watch_outpoint call,
1567 // note that this means if the remote end is misbehaving and sends a message for the same
1568 // channel back-to-back with funding_created, we'll end up thinking they sent a message
1569 // for a bogus channel.
1570 if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
1573 let mut channel_state = self.channel_state.lock().unwrap();
1574 match channel_state.by_id.entry(funding_msg.channel_id) {
1575 hash_map::Entry::Occupied(_) => {
1576 return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
1578 hash_map::Entry::Vacant(e) => {
1585 fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
1586 let (funding_txo, user_id, monitor) = {
1587 let mut channel_state = self.channel_state.lock().unwrap();
1588 match channel_state.by_id.get_mut(&msg.channel_id) {
1590 if chan.get_their_node_id() != *their_node_id {
1591 //TODO: here and below MsgHandleErrInternal, #153 case
1592 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1594 let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1595 (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor)
1597 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1600 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
1603 let mut pending_events = self.pending_events.lock().unwrap();
1604 pending_events.push(events::Event::FundingBroadcastSafe {
1605 funding_txo: funding_txo,
1606 user_channel_id: user_id,
1611 fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, MsgHandleErrInternal> {
1612 let mut channel_state = self.channel_state.lock().unwrap();
1613 match channel_state.by_id.get_mut(&msg.channel_id) {
1615 if chan.get_their_node_id() != *their_node_id {
1616 //TODO: here and below MsgHandleErrInternal, #153 case
1617 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1619 chan.funding_locked(&msg)
1620 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1621 return Ok(self.get_announcement_sigs(chan));
1623 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1627 fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), MsgHandleErrInternal> {
1628 let (mut res, chan_option) = {
1629 let mut channel_state_lock = self.channel_state.lock().unwrap();
1630 let channel_state = channel_state_lock.borrow_parts();
1632 match channel_state.by_id.entry(msg.channel_id.clone()) {
1633 hash_map::Entry::Occupied(mut chan_entry) => {
1634 if chan_entry.get().get_their_node_id() != *their_node_id {
1635 //TODO: here and below MsgHandleErrInternal, #153 case
1636 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1638 let res = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1639 if chan_entry.get().is_shutdown() {
1640 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1641 channel_state.short_to_id.remove(&short_id);
1643 (res, Some(chan_entry.remove_entry().1))
1644 } else { (res, None) }
1646 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1649 for htlc_source in res.2.drain(..) {
1650 // unknown_next_peer...I dunno who that is anymore....
1651 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
1653 if let Some(chan) = chan_option {
1654 if let Ok(update) = self.get_channel_update(&chan) {
1655 let mut events = self.pending_events.lock().unwrap();
1656 events.push(events::Event::BroadcastChannelUpdate {
1664 fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, MsgHandleErrInternal> {
1665 let (res, chan_option) = {
1666 let mut channel_state_lock = self.channel_state.lock().unwrap();
1667 let channel_state = channel_state_lock.borrow_parts();
1668 match channel_state.by_id.entry(msg.channel_id.clone()) {
1669 hash_map::Entry::Occupied(mut chan_entry) => {
1670 if chan_entry.get().get_their_node_id() != *their_node_id {
1671 //TODO: here and below MsgHandleErrInternal, #153 case
1672 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1674 let res = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
1675 if res.1.is_some() {
1676 // We're done with this channel, we've got a signed closing transaction and
1677 // will send the closing_signed back to the remote peer upon return. This
1678 // also implies there are no pending HTLCs left on the channel, so we can
1679 // fully delete it from tracking (the channel monitor is still around to
1680 // watch for old state broadcasts)!
1681 if let Some(short_id) = chan_entry.get().get_short_channel_id() {
1682 channel_state.short_to_id.remove(&short_id);
1684 (res, Some(chan_entry.remove_entry().1))
1685 } else { (res, None) }
1687 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1690 if let Some(broadcast_tx) = res.1 {
1691 self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
1693 if let Some(chan) = chan_option {
1694 if let Ok(update) = self.get_channel_update(&chan) {
1695 let mut events = self.pending_events.lock().unwrap();
1696 events.push(events::Event::BroadcastChannelUpdate {
1704 fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
1705 //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
1706 //determine the state of the payment based on our response/if we forward anything/the time
1707 //we take to respond. We should take care to avoid allowing such an attack.
1709 //TODO: There exists a further attack where a node may garble the onion data, forward it to
1710 //us repeatedly garbled in different ways, and compare our error messages, which are
1711 //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
1712 //but we should prevent it anyway.
1714 let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
1715 let channel_state = channel_state_lock.borrow_parts();
1717 match channel_state.by_id.get_mut(&msg.channel_id) {
1719 if chan.get_their_node_id() != *their_node_id {
1720 //TODO: here MsgHandleErrInternal, #153 case
1721 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1723 if !chan.is_usable() {
1724 return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Channel not yet available for receiving HTLCs", action: Some(msgs::ErrorAction::IgnoreError)}));
1726 chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
1728 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1732 fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
1733 let mut channel_state = self.channel_state.lock().unwrap();
1734 let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
1736 if chan.get_their_node_id() != *their_node_id {
1737 //TODO: here and below MsgHandleErrInternal, #153 case
1738 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1740 chan.update_fulfill_htlc(&msg)
1741 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone()
1743 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1745 self.claim_funds_internal(channel_state, htlc_source, msg.payment_preimage.clone());
1749 fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, MsgHandleErrInternal> {
1750 let mut channel_state = self.channel_state.lock().unwrap();
1751 let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
1753 if chan.get_their_node_id() != *their_node_id {
1754 //TODO: here and below MsgHandleErrInternal, #153 case
1755 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1757 chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
1758 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
1760 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1764 &HTLCSource::OutboundRoute { ref route, ref session_priv, .. } => {
1765 // Handle packed channel/node updates for passing back for the route handler
1766 let mut packet_decrypted = msg.reason.data.clone();
1768 Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| {
1769 if res.is_some() { return; }
1771 let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
1773 let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
1774 decryption_tmp.resize(packet_decrypted.len(), 0);
1775 let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
1776 chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
1777 packet_decrypted = decryption_tmp;
1779 if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
1780 if err_packet.failuremsg.len() >= 2 {
1781 let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
1783 let mut hmac = Hmac::new(Sha256::new(), &um);
1784 hmac.input(&err_packet.encode()[32..]);
1785 let mut calc_tag = [0u8; 32];
1786 hmac.raw_result(&mut calc_tag);
1787 if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
1788 const UNKNOWN_CHAN: u16 = 0x4000|10;
1789 const TEMP_CHAN_FAILURE: u16 = 0x4000|7;
1790 match byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]) {
1791 TEMP_CHAN_FAILURE => {
1792 if err_packet.failuremsg.len() >= 4 {
1793 let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[2..4]) as usize;
1794 if err_packet.failuremsg.len() >= 4 + update_len {
1795 if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[4..4 + update_len])) {
1796 res = Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
1804 // No such next-hop. We know this came from the
1805 // current node as the HMAC validated.
1806 res = Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
1807 short_channel_id: route_hop.short_channel_id
1810 _ => {}, //TODO: Enumerate all of these!
1822 fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
1823 let mut channel_state = self.channel_state.lock().unwrap();
1824 match channel_state.by_id.get_mut(&msg.channel_id) {
1826 if chan.get_their_node_id() != *their_node_id {
1827 //TODO: here and below MsgHandleErrInternal, #153 case
1828 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1830 if (msg.failure_code & 0x8000) != 0 {
1831 return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id));
1833 chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
1834 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1837 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1841 fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), MsgHandleErrInternal> {
1842 let (revoke_and_ack, commitment_signed, chan_monitor) = {
1843 let mut channel_state = self.channel_state.lock().unwrap();
1844 match channel_state.by_id.get_mut(&msg.channel_id) {
1846 if chan.get_their_node_id() != *their_node_id {
1847 //TODO: here and below MsgHandleErrInternal, #153 case
1848 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1850 chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?
1852 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1855 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1859 Ok((revoke_and_ack, commitment_signed))
1862 fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, MsgHandleErrInternal> {
1863 let ((res, mut pending_forwards, mut pending_failures, chan_monitor), short_channel_id) = {
1864 let mut channel_state = self.channel_state.lock().unwrap();
1865 match channel_state.by_id.get_mut(&msg.channel_id) {
1867 if chan.get_their_node_id() != *their_node_id {
1868 //TODO: here and below MsgHandleErrInternal, #153 case
1869 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1871 (chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
1873 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1876 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
1879 for failure in pending_failures.drain(..) {
1880 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
1883 let mut forward_event = None;
1884 if !pending_forwards.is_empty() {
1885 let mut channel_state = self.channel_state.lock().unwrap();
1886 if channel_state.forward_htlcs.is_empty() {
1887 forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
1888 channel_state.next_forward = forward_event.unwrap();
1890 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
1891 match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
1892 hash_map::Entry::Occupied(mut entry) => {
1893 entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info });
1895 hash_map::Entry::Vacant(entry) => {
1896 entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info }));
1901 match forward_event {
1903 let mut pending_events = self.pending_events.lock().unwrap();
1904 pending_events.push(events::Event::PendingHTLCsForwardable {
1905 time_forwardable: time
1914 fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
1915 let mut channel_state = self.channel_state.lock().unwrap();
1916 match channel_state.by_id.get_mut(&msg.channel_id) {
1918 if chan.get_their_node_id() != *their_node_id {
1919 //TODO: here and below MsgHandleErrInternal, #153 case
1920 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1922 chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
1924 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1928 fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
1929 let (chan_announcement, chan_update) = {
1930 let mut channel_state = self.channel_state.lock().unwrap();
1931 match channel_state.by_id.get_mut(&msg.channel_id) {
1933 if chan.get_their_node_id() != *their_node_id {
1934 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1936 if !chan.is_usable() {
1937 return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
1940 let our_node_id = self.get_our_node_id();
1941 let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())
1942 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1944 let were_node_one = announcement.node_id_1 == our_node_id;
1945 let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
1946 let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id);
1947 secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action);
1948 secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action);
1950 let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
1952 (msgs::ChannelAnnouncement {
1953 node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
1954 node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
1955 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
1956 bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
1957 contents: announcement,
1958 }, self.get_channel_update(chan).unwrap()) // can only fail if we're not in a ready state
1960 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1963 let mut pending_events = self.pending_events.lock().unwrap();
1964 pending_events.push(events::Event::BroadcastChannelAnnouncement { msg: chan_announcement, update_msg: chan_update });
1968 fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), MsgHandleErrInternal> {
1969 let (res, chan_monitor) = {
1970 let mut channel_state = self.channel_state.lock().unwrap();
1971 match channel_state.by_id.get_mut(&msg.channel_id) {
1973 if chan.get_their_node_id() != *their_node_id {
1974 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
1976 let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg)
1977 .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
1978 (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor)
1980 None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
1983 if let Some(monitor) = chan_monitor {
1984 if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
1991 /// Begin Update fee process. Allowed only on an outbound channel.
1992 /// If successful, will generate a UpdateHTLCs event, so you should probably poll
1993 /// PeerManager::process_events afterwards.
1994 /// Note: This API is likely to change!
1996 pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
1997 let mut channel_state = self.channel_state.lock().unwrap();
1998 match channel_state.by_id.get_mut(&channel_id) {
1999 None => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
2001 if !chan.is_usable() {
2002 return Err(APIError::APIMisuseError{err: "Channel is not in usuable state"});
2004 if !chan.is_outbound() {
2005 return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
2007 if let Some((update_fee, commitment_signed, chan_monitor)) = chan.send_update_fee_and_commit(feerate_per_kw).map_err(|e| APIError::APIMisuseError{err: e.err})? {
2008 if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
2011 let mut pending_events = self.pending_events.lock().unwrap();
2012 pending_events.push(events::Event::UpdateHTLCs {
2013 node_id: chan.get_their_node_id(),
2014 updates: msgs::CommitmentUpdate {
2015 update_add_htlcs: Vec::new(),
2016 update_fulfill_htlcs: Vec::new(),
2017 update_fail_htlcs: Vec::new(),
2018 update_fail_malformed_htlcs: Vec::new(),
2019 update_fee: Some(update_fee),
2030 impl events::EventsProvider for ChannelManager {
2031 fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
2032 let mut pending_events = self.pending_events.lock().unwrap();
2033 let mut ret = Vec::new();
2034 mem::swap(&mut ret, &mut *pending_events);
2039 impl ChainListener for ChannelManager {
2040 fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
2041 let mut new_events = Vec::new();
2042 let mut failed_channels = Vec::new();
2044 let mut channel_lock = self.channel_state.lock().unwrap();
2045 let channel_state = channel_lock.borrow_parts();
2046 let short_to_id = channel_state.short_to_id;
2047 channel_state.by_id.retain(|_, channel| {
2048 let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
2049 if let Ok(Some(funding_locked)) = chan_res {
2050 let announcement_sigs = self.get_announcement_sigs(channel);
2051 new_events.push(events::Event::SendFundingLocked {
2052 node_id: channel.get_their_node_id(),
2053 msg: funding_locked,
2054 announcement_sigs: announcement_sigs
2056 short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
2057 } else if let Err(e) = chan_res {
2058 new_events.push(events::Event::HandleError {
2059 node_id: channel.get_their_node_id(),
2062 if channel.is_shutdown() {
2066 if let Some(funding_txo) = channel.get_funding_txo() {
2067 for tx in txn_matched {
2068 for inp in tx.input.iter() {
2069 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
2070 if let Some(short_id) = channel.get_short_channel_id() {
2071 short_to_id.remove(&short_id);
2073 // It looks like our counterparty went on-chain. We go ahead and
2074 // broadcast our latest local state as well here, just in case its
2075 // some kind of SPV attack, though we expect these to be dropped.
2076 failed_channels.push(channel.force_shutdown());
2077 if let Ok(update) = self.get_channel_update(&channel) {
2078 new_events.push(events::Event::BroadcastChannelUpdate {
2087 if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
2088 if let Some(short_id) = channel.get_short_channel_id() {
2089 short_to_id.remove(&short_id);
2091 failed_channels.push(channel.force_shutdown());
2092 // If would_broadcast_at_height() is true, the channel_monitor will broadcast
2093 // the latest local tx for us, so we should skip that here (it doesn't really
2094 // hurt anything, but does make tests a bit simpler).
2095 failed_channels.last_mut().unwrap().0 = Vec::new();
2096 if let Ok(update) = self.get_channel_update(&channel) {
2097 new_events.push(events::Event::BroadcastChannelUpdate {
2106 for failure in failed_channels.drain(..) {
2107 self.finish_force_close_channel(failure);
2109 let mut pending_events = self.pending_events.lock().unwrap();
2110 for funding_locked in new_events.drain(..) {
2111 pending_events.push(funding_locked);
2113 self.latest_block_height.store(height as usize, Ordering::Release);
2116 /// We force-close the channel without letting our counterparty participate in the shutdown
2117 fn block_disconnected(&self, header: &BlockHeader) {
2118 let mut new_events = Vec::new();
2119 let mut failed_channels = Vec::new();
2121 let mut channel_lock = self.channel_state.lock().unwrap();
2122 let channel_state = channel_lock.borrow_parts();
2123 let short_to_id = channel_state.short_to_id;
2124 channel_state.by_id.retain(|_, v| {
2125 if v.block_disconnected(header) {
2126 if let Some(short_id) = v.get_short_channel_id() {
2127 short_to_id.remove(&short_id);
2129 failed_channels.push(v.force_shutdown());
2130 if let Ok(update) = self.get_channel_update(&v) {
2131 new_events.push(events::Event::BroadcastChannelUpdate {
2141 for failure in failed_channels.drain(..) {
2142 self.finish_force_close_channel(failure);
2144 if !new_events.is_empty() {
2145 let mut pending_events = self.pending_events.lock().unwrap();
2146 for funding_locked in new_events.drain(..) {
2147 pending_events.push(funding_locked);
2150 self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
2154 macro_rules! handle_error {
2155 ($self: ident, $internal: expr, $their_node_id: expr) => {
2158 Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
2159 if needs_channel_force_close {
2161 &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
2162 if msg.channel_id == [0; 32] {
2163 $self.peer_disconnected(&$their_node_id, true);
2165 $self.force_close_channel(&msg.channel_id);
2168 &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
2169 &Some(msgs::ErrorAction::IgnoreError) => {},
2170 &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
2171 if msg.channel_id == [0; 32] {
2172 $self.peer_disconnected(&$their_node_id, true);
2174 $self.force_close_channel(&msg.channel_id);
2186 impl ChannelMessageHandler for ChannelManager {
2187 //TODO: Handle errors and close channel (or so)
2188 fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, HandleError> {
2189 handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id)
2192 fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
2193 handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id)
2196 fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, HandleError> {
2197 handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id)
2200 fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
2201 handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id)
2204 fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, HandleError> {
2205 handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id)
2208 fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), HandleError> {
2209 handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id)
2212 fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, HandleError> {
2213 handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id)
2216 fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
2217 handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id)
2220 fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
2221 handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
2224 fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
2225 handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
2228 fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
2229 handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id)
2232 fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
2233 handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id)
2236 fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
2237 handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id)
2240 fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
2241 handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id)
2244 fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
2245 handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
2248 fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), HandleError> {
2249 handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
2252 fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
2253 let mut new_events = Vec::new();
2254 let mut failed_channels = Vec::new();
2255 let mut failed_payments = Vec::new();
2257 let mut channel_state_lock = self.channel_state.lock().unwrap();
2258 let channel_state = channel_state_lock.borrow_parts();
2259 let short_to_id = channel_state.short_to_id;
2260 if no_connection_possible {
2261 channel_state.by_id.retain(|_, chan| {
2262 if chan.get_their_node_id() == *their_node_id {
2263 if let Some(short_id) = chan.get_short_channel_id() {
2264 short_to_id.remove(&short_id);
2266 failed_channels.push(chan.force_shutdown());
2267 if let Ok(update) = self.get_channel_update(&chan) {
2268 new_events.push(events::Event::BroadcastChannelUpdate {
2278 channel_state.by_id.retain(|_, chan| {
2279 if chan.get_their_node_id() == *their_node_id {
2280 //TODO: mark channel disabled (and maybe announce such after a timeout).
2281 let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused();
2282 if !failed_adds.is_empty() {
2283 let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
2284 failed_payments.push((chan_update, failed_adds));
2286 if chan.is_shutdown() {
2287 if let Some(short_id) = chan.get_short_channel_id() {
2288 short_to_id.remove(&short_id);
2297 for failure in failed_channels.drain(..) {
2298 self.finish_force_close_channel(failure);
2300 if !new_events.is_empty() {
2301 let mut pending_events = self.pending_events.lock().unwrap();
2302 for event in new_events.drain(..) {
2303 pending_events.push(event);
2306 for (chan_update, mut htlc_sources) in failed_payments {
2307 for (htlc_source, payment_hash) in htlc_sources.drain(..) {
2308 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
2313 fn peer_connected(&self, their_node_id: &PublicKey) -> Vec<msgs::ChannelReestablish> {
2314 let mut res = Vec::new();
2315 let mut channel_state = self.channel_state.lock().unwrap();
2316 channel_state.by_id.retain(|_, chan| {
2317 if chan.get_their_node_id() == *their_node_id {
2318 if !chan.have_received_message() {
2319 // If we created this (outbound) channel while we were disconnected from the
2320 // peer we probably failed to send the open_channel message, which is now
2321 // lost. We can't have had anything pending related to this channel, so we just
2325 res.push(chan.get_channel_reestablish());
2330 //TODO: Also re-broadcast announcement_signatures
2334 fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
2335 if msg.channel_id == [0; 32] {
2336 for chan in self.list_channels() {
2337 if chan.remote_network_id == *their_node_id {
2338 self.force_close_channel(&chan.channel_id);
2342 self.force_close_channel(&msg.channel_id);
2349 use chain::chaininterface;
2350 use chain::transaction::OutPoint;
2351 use chain::chaininterface::ChainListener;
2352 use ln::channelmanager::{ChannelManager,OnionKeys};
2353 use ln::router::{Route, RouteHop, Router};
2355 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
2356 use util::test_utils;
2357 use util::events::{Event, EventsProvider};
2358 use util::errors::APIError;
2359 use util::logger::Logger;
2360 use util::ser::Writeable;
2362 use bitcoin::util::hash::Sha256dHash;
2363 use bitcoin::blockdata::block::{Block, BlockHeader};
2364 use bitcoin::blockdata::transaction::{Transaction, TxOut};
2365 use bitcoin::blockdata::constants::genesis_block;
2366 use bitcoin::network::constants::Network;
2367 use bitcoin::network::serialize::serialize;
2368 use bitcoin::network::serialize::BitcoinHash;
2372 use secp256k1::{Secp256k1, Message};
2373 use secp256k1::key::{PublicKey,SecretKey};
2375 use crypto::sha2::Sha256;
2376 use crypto::digest::Digest;
2378 use rand::{thread_rng,Rng};
2380 use std::cell::RefCell;
2381 use std::collections::{BTreeSet, HashMap};
2382 use std::default::Default;
2384 use std::sync::{Arc, Mutex};
2385 use std::time::Instant;
2388 fn build_test_onion_keys() -> Vec<OnionKeys> {
2389 // Keys from BOLT 4, used in both test vector tests
2390 let secp_ctx = Secp256k1::new();
2395 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
2396 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2399 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
2400 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2403 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
2404 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2407 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]).unwrap(),
2408 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2411 pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()[..]).unwrap(),
2412 short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually
2417 let session_priv = SecretKey::from_slice(&secp_ctx, &hex::decode("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap();
2419 let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
2420 assert_eq!(onion_keys.len(), route.hops.len());
2425 fn onion_vectors() {
2426 // Packet creation test vectors from BOLT 4
2427 let onion_keys = build_test_onion_keys();
2429 assert_eq!(onion_keys[0].shared_secret[..], hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
2430 assert_eq!(onion_keys[0].blinding_factor[..], hex::decode("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
2431 assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
2432 assert_eq!(onion_keys[0].rho, hex::decode("ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986").unwrap()[..]);
2433 assert_eq!(onion_keys[0].mu, hex::decode("b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba").unwrap()[..]);
2435 assert_eq!(onion_keys[1].shared_secret[..], hex::decode("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
2436 assert_eq!(onion_keys[1].blinding_factor[..], hex::decode("bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f").unwrap()[..]);
2437 assert_eq!(onion_keys[1].ephemeral_pubkey.serialize()[..], hex::decode("028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2").unwrap()[..]);
2438 assert_eq!(onion_keys[1].rho, hex::decode("450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59").unwrap()[..]);
2439 assert_eq!(onion_keys[1].mu, hex::decode("05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9").unwrap()[..]);
2441 assert_eq!(onion_keys[2].shared_secret[..], hex::decode("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
2442 assert_eq!(onion_keys[2].blinding_factor[..], hex::decode("a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5").unwrap()[..]);
2443 assert_eq!(onion_keys[2].ephemeral_pubkey.serialize()[..], hex::decode("03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0").unwrap()[..]);
2444 assert_eq!(onion_keys[2].rho, hex::decode("11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea").unwrap()[..]);
2445 assert_eq!(onion_keys[2].mu, hex::decode("caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78").unwrap()[..]);
2447 assert_eq!(onion_keys[3].shared_secret[..], hex::decode("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
2448 assert_eq!(onion_keys[3].blinding_factor[..], hex::decode("7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262").unwrap()[..]);
2449 assert_eq!(onion_keys[3].ephemeral_pubkey.serialize()[..], hex::decode("031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595").unwrap()[..]);
2450 assert_eq!(onion_keys[3].rho, hex::decode("cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e").unwrap()[..]);
2451 assert_eq!(onion_keys[3].mu, hex::decode("5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9").unwrap()[..]);
2453 assert_eq!(onion_keys[4].shared_secret[..], hex::decode("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
2454 assert_eq!(onion_keys[4].blinding_factor[..], hex::decode("c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205").unwrap()[..]);
2455 assert_eq!(onion_keys[4].ephemeral_pubkey.serialize()[..], hex::decode("03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4").unwrap()[..]);
2456 assert_eq!(onion_keys[4].rho, hex::decode("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
2457 assert_eq!(onion_keys[4].mu, hex::decode("8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a").unwrap()[..]);
2459 // Test vectors below are flat-out wrong: they claim to set outgoing_cltv_value to non-0 :/
2460 let payloads = vec!(
2461 msgs::OnionHopData {
2463 data: msgs::OnionRealm0HopData {
2464 short_channel_id: 0,
2466 outgoing_cltv_value: 0,
2470 msgs::OnionHopData {
2472 data: msgs::OnionRealm0HopData {
2473 short_channel_id: 0x0101010101010101,
2474 amt_to_forward: 0x0100000001,
2475 outgoing_cltv_value: 0,
2479 msgs::OnionHopData {
2481 data: msgs::OnionRealm0HopData {
2482 short_channel_id: 0x0202020202020202,
2483 amt_to_forward: 0x0200000002,
2484 outgoing_cltv_value: 0,
2488 msgs::OnionHopData {
2490 data: msgs::OnionRealm0HopData {
2491 short_channel_id: 0x0303030303030303,
2492 amt_to_forward: 0x0300000003,
2493 outgoing_cltv_value: 0,
2497 msgs::OnionHopData {
2499 data: msgs::OnionRealm0HopData {
2500 short_channel_id: 0x0404040404040404,
2501 amt_to_forward: 0x0400000004,
2502 outgoing_cltv_value: 0,
2508 let packet = ChannelManager::construct_onion_packet(payloads, onion_keys, &[0x42; 32]);
2509 // Just check the final packet encoding, as it includes all the per-hop vectors in it
2511 assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
2515 fn test_failure_packet_onion() {
2516 // Returning Errors test vectors from BOLT 4
2518 let onion_keys = build_test_onion_keys();
2519 let onion_error = ChannelManager::build_failure_packet(&onion_keys[4].shared_secret, 0x2002, &[0; 0]);
2520 assert_eq!(onion_error.encode(), hex::decode("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap());
2522 let onion_packet_1 = ChannelManager::encrypt_failure_packet(&onion_keys[4].shared_secret, &onion_error.encode()[..]);
2523 assert_eq!(onion_packet_1.data, hex::decode("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap());
2525 let onion_packet_2 = ChannelManager::encrypt_failure_packet(&onion_keys[3].shared_secret, &onion_packet_1.data[..]);
2526 assert_eq!(onion_packet_2.data, hex::decode("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap());
2528 let onion_packet_3 = ChannelManager::encrypt_failure_packet(&onion_keys[2].shared_secret, &onion_packet_2.data[..]);
2529 assert_eq!(onion_packet_3.data, hex::decode("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap());
2531 let onion_packet_4 = ChannelManager::encrypt_failure_packet(&onion_keys[1].shared_secret, &onion_packet_3.data[..]);
2532 assert_eq!(onion_packet_4.data, hex::decode("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap());
2534 let onion_packet_5 = ChannelManager::encrypt_failure_packet(&onion_keys[0].shared_secret, &onion_packet_4.data[..]);
2535 assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
2538 fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
2539 assert!(chain.does_match_tx(tx));
2540 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
2541 chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
2543 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
2544 chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
2549 chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
2550 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
2551 chan_monitor: Arc<test_utils::TestChannelMonitor>,
2552 node: Arc<ChannelManager>,
2554 network_payment_count: Rc<RefCell<u8>>,
2555 network_chan_count: Rc<RefCell<u32>>,
2557 impl Drop for Node {
2558 fn drop(&mut self) {
2559 if !::std::thread::panicking() {
2560 // Check that we processed all pending events
2561 assert_eq!(self.node.get_and_clear_pending_events().len(), 0);
2562 assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0);
2567 fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
2568 node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 10001, 42).unwrap();
2570 let events_1 = node_a.node.get_and_clear_pending_events();
2571 assert_eq!(events_1.len(), 1);
2572 let accept_chan = match events_1[0] {
2573 Event::SendOpenChannel { ref node_id, ref msg } => {
2574 assert_eq!(*node_id, node_b.node.get_our_node_id());
2575 node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), msg).unwrap()
2577 _ => panic!("Unexpected event"),
2580 node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_chan).unwrap();
2582 let chan_id = *node_a.network_chan_count.borrow();
2586 let events_2 = node_a.node.get_and_clear_pending_events();
2587 assert_eq!(events_2.len(), 1);
2589 Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
2590 assert_eq!(*channel_value_satoshis, 100000);
2591 assert_eq!(user_channel_id, 42);
2593 tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
2594 value: *channel_value_satoshis, script_pubkey: output_script.clone(),
2596 funding_output = OutPoint::new(Sha256dHash::from_data(&serialize(&tx).unwrap()[..]), 0);
2598 node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
2599 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
2600 assert_eq!(added_monitors.len(), 1);
2601 assert_eq!(added_monitors[0].0, funding_output);
2602 added_monitors.clear();
2604 _ => panic!("Unexpected event"),
2607 let events_3 = node_a.node.get_and_clear_pending_events();
2608 assert_eq!(events_3.len(), 1);
2609 let funding_signed = match events_3[0] {
2610 Event::SendFundingCreated { ref node_id, ref msg } => {
2611 assert_eq!(*node_id, node_b.node.get_our_node_id());
2612 let res = node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), msg).unwrap();
2613 let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
2614 assert_eq!(added_monitors.len(), 1);
2615 assert_eq!(added_monitors[0].0, funding_output);
2616 added_monitors.clear();
2619 _ => panic!("Unexpected event"),
2622 node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &funding_signed).unwrap();
2624 let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
2625 assert_eq!(added_monitors.len(), 1);
2626 assert_eq!(added_monitors[0].0, funding_output);
2627 added_monitors.clear();
2630 let events_4 = node_a.node.get_and_clear_pending_events();
2631 assert_eq!(events_4.len(), 1);
2633 Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
2634 assert_eq!(user_channel_id, 42);
2635 assert_eq!(*funding_txo, funding_output);
2637 _ => panic!("Unexpected event"),
2640 confirm_transaction(&node_a.chain_monitor, &tx, chan_id);
2641 let events_5 = node_a.node.get_and_clear_pending_events();
2642 assert_eq!(events_5.len(), 1);
2644 Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
2645 assert_eq!(*node_id, node_b.node.get_our_node_id());
2646 assert!(announcement_sigs.is_none());
2647 node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap()
2649 _ => panic!("Unexpected event"),
2654 confirm_transaction(&node_b.chain_monitor, &tx, chan_id);
2655 let events_6 = node_b.node.get_and_clear_pending_events();
2656 assert_eq!(events_6.len(), 1);
2657 let as_announcement_sigs = match events_6[0] {
2658 Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
2659 assert_eq!(*node_id, node_a.node.get_our_node_id());
2660 channel_id = msg.channel_id.clone();
2661 let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap();
2662 node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
2663 as_announcement_sigs
2665 _ => panic!("Unexpected event"),
2668 let events_7 = node_a.node.get_and_clear_pending_events();
2669 assert_eq!(events_7.len(), 1);
2670 let (announcement, as_update) = match events_7[0] {
2671 Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
2674 _ => panic!("Unexpected event"),
2677 node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap();
2678 let events_8 = node_b.node.get_and_clear_pending_events();
2679 assert_eq!(events_8.len(), 1);
2680 let bs_update = match events_8[0] {
2681 Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
2682 assert!(*announcement == *msg);
2685 _ => panic!("Unexpected event"),
2688 *node_a.network_chan_count.borrow_mut() += 1;
2690 ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx)
2693 fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
2694 let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]);
2696 assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
2697 node.router.handle_channel_update(&chan_announcement.1).unwrap();
2698 node.router.handle_channel_update(&chan_announcement.2).unwrap();
2700 (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
2703 macro_rules! check_spends {
2704 ($tx: expr, $spends_tx: expr) => {
2706 let mut funding_tx_map = HashMap::new();
2707 let spends_tx = $spends_tx;
2708 funding_tx_map.insert(spends_tx.txid(), spends_tx);
2709 $tx.verify(&funding_tx_map).unwrap();
2714 fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
2715 let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) };
2716 let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
2719 node_a.close_channel(channel_id).unwrap();
2720 let events_1 = node_a.get_and_clear_pending_events();
2721 assert_eq!(events_1.len(), 1);
2722 let shutdown_a = match events_1[0] {
2723 Event::SendShutdown { ref node_id, ref msg } => {
2724 assert_eq!(node_id, &node_b.get_our_node_id());
2727 _ => panic!("Unexpected event"),
2730 let (shutdown_b, mut closing_signed_b) = node_b.handle_shutdown(&node_a.get_our_node_id(), &shutdown_a).unwrap();
2731 if !close_inbound_first {
2732 assert!(closing_signed_b.is_none());
2734 let (empty_a, mut closing_signed_a) = node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b.unwrap()).unwrap();
2735 assert!(empty_a.is_none());
2736 if close_inbound_first {
2737 assert!(closing_signed_a.is_none());
2738 closing_signed_a = node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
2739 assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
2740 tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
2742 let empty_b = node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
2743 assert!(empty_b.is_none());
2744 assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
2745 tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
2747 closing_signed_b = node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
2748 assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
2749 tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
2751 let empty_a2 = node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
2752 assert!(empty_a2.is_none());
2753 assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
2754 tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
2756 assert_eq!(tx_a, tx_b);
2757 check_spends!(tx_a, funding_tx);
2759 let events_2 = node_a.get_and_clear_pending_events();
2760 assert_eq!(events_2.len(), 1);
2761 let as_update = match events_2[0] {
2762 Event::BroadcastChannelUpdate { ref msg } => {
2765 _ => panic!("Unexpected event"),
2768 let events_3 = node_b.get_and_clear_pending_events();
2769 assert_eq!(events_3.len(), 1);
2770 let bs_update = match events_3[0] {
2771 Event::BroadcastChannelUpdate { ref msg } => {
2774 _ => panic!("Unexpected event"),
2777 (as_update, bs_update)
2782 msgs: Vec<msgs::UpdateAddHTLC>,
2783 commitment_msg: msgs::CommitmentSigned,
2786 fn from_event(event: Event) -> SendEvent {
2788 Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
2789 assert!(update_fulfill_htlcs.is_empty());
2790 assert!(update_fail_htlcs.is_empty());
2791 assert!(update_fail_malformed_htlcs.is_empty());
2792 assert!(update_fee.is_none());
2793 SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed }
2795 _ => panic!("Unexpected event type!"),
2800 macro_rules! check_added_monitors {
2801 ($node: expr, $count: expr) => {
2803 let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
2804 assert_eq!(added_monitors.len(), $count);
2805 added_monitors.clear();
2810 macro_rules! commitment_signed_dance {
2811 ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
2813 check_added_monitors!($node_a, 0);
2814 let (as_revoke_and_ack, as_commitment_signed) = $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
2815 check_added_monitors!($node_a, 1);
2816 check_added_monitors!($node_b, 0);
2817 assert!($node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
2818 check_added_monitors!($node_b, 1);
2819 let (bs_revoke_and_ack, bs_none) = $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
2820 assert!(bs_none.is_none());
2821 check_added_monitors!($node_b, 1);
2822 if $fail_backwards {
2823 assert!($node_a.node.get_and_clear_pending_events().is_empty());
2825 assert!($node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
2827 let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
2828 if $fail_backwards {
2829 assert_eq!(added_monitors.len(), 2);
2830 assert!(added_monitors[0].0 != added_monitors[1].0);
2832 assert_eq!(added_monitors.len(), 1);
2834 added_monitors.clear();
2840 macro_rules! get_payment_preimage_hash {
2843 let payment_preimage = [*$node.network_payment_count.borrow(); 32];
2844 *$node.network_payment_count.borrow_mut() += 1;
2845 let mut payment_hash = [0; 32];
2846 let mut sha = Sha256::new();
2847 sha.input(&payment_preimage[..]);
2848 sha.result(&mut payment_hash);
2849 (payment_preimage, payment_hash)
2854 fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
2855 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
2857 let mut payment_event = {
2858 origin_node.node.send_payment(route, our_payment_hash).unwrap();
2859 check_added_monitors!(origin_node, 1);
2861 let mut events = origin_node.node.get_and_clear_pending_events();
2862 assert_eq!(events.len(), 1);
2863 SendEvent::from_event(events.remove(0))
2865 let mut prev_node = origin_node;
2867 for (idx, &node) in expected_route.iter().enumerate() {
2868 assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
2870 node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
2871 check_added_monitors!(node, 0);
2872 commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
2874 let events_1 = node.node.get_and_clear_pending_events();
2875 assert_eq!(events_1.len(), 1);
2877 Event::PendingHTLCsForwardable { .. } => { },
2878 _ => panic!("Unexpected event"),
2881 node.node.channel_state.lock().unwrap().next_forward = Instant::now();
2882 node.node.process_pending_htlc_forwards();
2884 let mut events_2 = node.node.get_and_clear_pending_events();
2885 assert_eq!(events_2.len(), 1);
2886 if idx == expected_route.len() - 1 {
2888 Event::PaymentReceived { ref payment_hash, amt } => {
2889 assert_eq!(our_payment_hash, *payment_hash);
2890 assert_eq!(amt, recv_value);
2892 _ => panic!("Unexpected event"),
2895 check_added_monitors!(node, 1);
2896 payment_event = SendEvent::from_event(events_2.remove(0));
2897 assert_eq!(payment_event.msgs.len(), 1);
2903 (our_payment_preimage, our_payment_hash)
2906 fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: [u8; 32]) {
2907 assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
2908 check_added_monitors!(expected_route.last().unwrap(), 1);
2910 let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
2911 macro_rules! update_fulfill_dance {
2912 ($node: expr, $prev_node: expr, $last_node: expr) => {
2914 $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
2916 check_added_monitors!($node, 0);
2918 check_added_monitors!($node, 1);
2920 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
2925 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
2926 let mut prev_node = expected_route.last().unwrap();
2927 for (idx, node) in expected_route.iter().rev().enumerate() {
2928 assert_eq!(expected_next_node, node.node.get_our_node_id());
2929 if next_msgs.is_some() {
2930 update_fulfill_dance!(node, prev_node, false);
2933 let events = node.node.get_and_clear_pending_events();
2934 if !skip_last || idx != expected_route.len() - 1 {
2935 assert_eq!(events.len(), 1);
2937 Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2938 assert!(update_add_htlcs.is_empty());
2939 assert_eq!(update_fulfill_htlcs.len(), 1);
2940 assert!(update_fail_htlcs.is_empty());
2941 assert!(update_fail_malformed_htlcs.is_empty());
2942 assert!(update_fee.is_none());
2943 expected_next_node = node_id.clone();
2944 next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()));
2946 _ => panic!("Unexpected event"),
2949 assert!(events.is_empty());
2951 if !skip_last && idx == expected_route.len() - 1 {
2952 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
2959 update_fulfill_dance!(origin_node, expected_route.first().unwrap(), true);
2960 let events = origin_node.node.get_and_clear_pending_events();
2961 assert_eq!(events.len(), 1);
2963 Event::PaymentSent { payment_preimage } => {
2964 assert_eq!(payment_preimage, our_payment_preimage);
2966 _ => panic!("Unexpected event"),
2971 fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: [u8; 32]) {
2972 claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage);
2975 const TEST_FINAL_CLTV: u32 = 32;
2977 fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> ([u8; 32], [u8; 32]) {
2978 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
2979 assert_eq!(route.hops.len(), expected_route.len());
2980 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
2981 assert_eq!(hop.pubkey, node.node.get_our_node_id());
2984 send_along_route(origin_node, route, expected_route, recv_value)
2987 fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
2988 let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
2989 assert_eq!(route.hops.len(), expected_route.len());
2990 for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
2991 assert_eq!(hop.pubkey, node.node.get_our_node_id());
2994 let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
2996 let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
2998 APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
2999 _ => panic!("Unknown error variants"),
3003 fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
3004 let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
3005 claim_payment(&origin, expected_route, our_payment_preimage);
3008 fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) {
3009 assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
3010 check_added_monitors!(expected_route.last().unwrap(), 1);
3012 let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
3013 macro_rules! update_fail_dance {
3014 ($node: expr, $prev_node: expr, $last_node: expr) => {
3016 $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
3017 commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
3022 let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
3023 let mut prev_node = expected_route.last().unwrap();
3024 for (idx, node) in expected_route.iter().rev().enumerate() {
3025 assert_eq!(expected_next_node, node.node.get_our_node_id());
3026 if next_msgs.is_some() {
3027 // We may be the "last node" for the purpose of the commitment dance if we're
3028 // skipping the last node (implying it is disconnected) and we're the
3029 // second-to-last node!
3030 update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1);
3033 let events = node.node.get_and_clear_pending_events();
3034 if !skip_last || idx != expected_route.len() - 1 {
3035 assert_eq!(events.len(), 1);
3037 Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
3038 assert!(update_add_htlcs.is_empty());
3039 assert!(update_fulfill_htlcs.is_empty());
3040 assert_eq!(update_fail_htlcs.len(), 1);
3041 assert!(update_fail_malformed_htlcs.is_empty());
3042 assert!(update_fee.is_none());
3043 expected_next_node = node_id.clone();
3044 next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
3046 _ => panic!("Unexpected event"),
3049 assert!(events.is_empty());
3051 if !skip_last && idx == expected_route.len() - 1 {
3052 assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
3059 update_fail_dance!(origin_node, expected_route.first().unwrap(), true);
3061 let events = origin_node.node.get_and_clear_pending_events();
3062 assert_eq!(events.len(), 1);
3064 Event::PaymentFailed { payment_hash } => {
3065 assert_eq!(payment_hash, our_payment_hash);
3067 _ => panic!("Unexpected event"),
3072 fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: [u8; 32]) {
3073 fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
3076 fn create_network(node_count: usize) -> Vec<Node> {
3077 let mut nodes = Vec::new();
3078 let mut rng = thread_rng();
3079 let secp_ctx = Secp256k1::new();
3080 let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
3082 let chan_count = Rc::new(RefCell::new(0));
3083 let payment_count = Rc::new(RefCell::new(0));
3085 for _ in 0..node_count {
3086 let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
3087 let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
3088 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
3089 let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
3091 let mut key_slice = [0; 32];
3092 rng.fill_bytes(&mut key_slice);
3093 SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
3095 let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap();
3096 let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id), chain_monitor.clone(), Arc::clone(&logger));
3097 nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router,
3098 network_payment_count: payment_count.clone(),
3099 network_chan_count: chan_count.clone(),
3107 fn test_async_inbound_update_fee() {
3108 let mut nodes = create_network(2);
3109 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3110 let channel_id = chan.2;
3112 macro_rules! get_feerate {
3114 let chan_lock = $node.node.channel_state.lock().unwrap();
3115 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3121 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
3125 // send (1) commitment_signed -.
3126 // <- update_add_htlc/commitment_signed
3127 // send (2) RAA (awaiting remote revoke) -.
3128 // (1) commitment_signed is delivered ->
3129 // .- send (3) RAA (awaiting remote revoke)
3130 // (2) RAA is delivered ->
3131 // .- send (4) commitment_signed
3132 // <- (3) RAA is delivered
3133 // send (5) commitment_signed -.
3134 // <- (4) commitment_signed is delivered
3136 // (5) commitment_signed is delivered ->
3138 // (6) RAA is delivered ->
3140 // First nodes[0] generates an update_fee
3141 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
3142 check_added_monitors!(nodes[0], 1);
3144 let events_0 = nodes[0].node.get_and_clear_pending_events();
3145 assert_eq!(events_0.len(), 1);
3146 let (update_msg, commitment_signed) = match events_0[0] { // (1)
3147 Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
3148 (update_fee.as_ref(), commitment_signed)
3150 _ => panic!("Unexpected event"),
3153 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3155 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
3156 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
3157 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
3158 check_added_monitors!(nodes[1], 1);
3160 let payment_event = {
3161 let mut events_1 = nodes[1].node.get_and_clear_pending_events();
3162 assert_eq!(events_1.len(), 1);
3163 SendEvent::from_event(events_1.remove(0))
3165 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
3166 assert_eq!(payment_event.msgs.len(), 1);
3168 // ...now when the messages get delivered everyone should be happy
3169 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3170 let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
3171 assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack
3172 check_added_monitors!(nodes[0], 1);
3174 // deliver(1), generate (3):
3175 let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3176 assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting nodes[0] revoke_and_ack
3177 check_added_monitors!(nodes[1], 1);
3179 let bs_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2)
3180 assert!(bs_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (4)
3181 assert!(bs_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (4)
3182 assert!(bs_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (4)
3183 assert!(bs_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (4)
3184 assert!(bs_update.as_ref().unwrap().update_fee.is_none()); // (4)
3185 check_added_monitors!(nodes[1], 1);
3187 let as_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap(); // deliver (3)
3188 assert!(as_update.as_ref().unwrap().update_add_htlcs.is_empty()); // (5)
3189 assert!(as_update.as_ref().unwrap().update_fulfill_htlcs.is_empty()); // (5)
3190 assert!(as_update.as_ref().unwrap().update_fail_htlcs.is_empty()); // (5)
3191 assert!(as_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); // (5)
3192 assert!(as_update.as_ref().unwrap().update_fee.is_none()); // (5)
3193 check_added_monitors!(nodes[0], 1);
3195 let (as_second_revoke, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.unwrap().commitment_signed).unwrap(); // deliver (4)
3196 assert!(as_second_commitment_signed.is_none()); // only (6)
3197 check_added_monitors!(nodes[0], 1);
3199 let (bs_second_revoke, bs_second_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.unwrap().commitment_signed).unwrap(); // deliver (5)
3200 assert!(bs_second_commitment_signed.is_none());
3201 check_added_monitors!(nodes[1], 1);
3203 assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none());
3204 check_added_monitors!(nodes[0], 1);
3206 let events_2 = nodes[0].node.get_and_clear_pending_events();
3207 assert_eq!(events_2.len(), 1);
3209 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
3210 _ => panic!("Unexpected event"),
3213 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none()); // deliver (6)
3214 check_added_monitors!(nodes[1], 1);
3218 fn test_update_fee_unordered_raa() {
3219 // Just the intro to the previous test followed by an out-of-order RAA (which caused a
3220 // crash in an earlier version of the update_fee patch)
3221 let mut nodes = create_network(2);
3222 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3223 let channel_id = chan.2;
3225 macro_rules! get_feerate {
3227 let chan_lock = $node.node.channel_state.lock().unwrap();
3228 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3234 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
3236 // First nodes[0] generates an update_fee
3237 nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0]) + 20).unwrap();
3238 check_added_monitors!(nodes[0], 1);
3240 let events_0 = nodes[0].node.get_and_clear_pending_events();
3241 assert_eq!(events_0.len(), 1);
3242 let update_msg = match events_0[0] { // (1)
3243 Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
3246 _ => panic!("Unexpected event"),
3249 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3251 // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
3252 let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
3253 nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
3254 check_added_monitors!(nodes[1], 1);
3256 let payment_event = {
3257 let mut events_1 = nodes[1].node.get_and_clear_pending_events();
3258 assert_eq!(events_1.len(), 1);
3259 SendEvent::from_event(events_1.remove(0))
3261 assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
3262 assert_eq!(payment_event.msgs.len(), 1);
3264 // ...now when the messages get delivered everyone should be happy
3265 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
3266 let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
3267 assert!(as_commitment_signed.is_none()); // nodes[0] is awaiting nodes[1] revoke_and_ack
3268 check_added_monitors!(nodes[0], 1);
3270 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none()); // deliver (2)
3271 check_added_monitors!(nodes[1], 1);
3273 // We can't continue, sadly, because our (1) now has a bogus signature
3277 fn test_multi_flight_update_fee() {
3278 let nodes = create_network(2);
3279 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3280 let channel_id = chan.2;
3282 macro_rules! get_feerate {
3284 let chan_lock = $node.node.channel_state.lock().unwrap();
3285 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3291 // update_fee/commitment_signed ->
3292 // .- send (1) RAA and (2) commitment_signed
3293 // update_fee (never committed) ->
3294 // (3) update_fee ->
3295 // We have to manually generate the above update_fee, it is allowed by the protocol but we
3296 // don't track which updates correspond to which revoke_and_ack responses so we're in
3297 // AwaitingRAA mode and will not generate the update_fee yet.
3298 // <- (1) RAA delivered
3299 // (3) is generated and send (4) CS -.
3300 // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
3301 // know the per_commitment_point to use for it.
3302 // <- (2) commitment_signed delivered
3303 // revoke_and_ack ->
3304 // B should send no response here
3305 // (4) commitment_signed delivered ->
3306 // <- RAA/commitment_signed delivered
3307 // revoke_and_ack ->
3309 // First nodes[0] generates an update_fee
3310 let initial_feerate = get_feerate!(nodes[0]);
3311 nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
3312 check_added_monitors!(nodes[0], 1);
3314 let events_0 = nodes[0].node.get_and_clear_pending_events();
3315 assert_eq!(events_0.len(), 1);
3316 let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
3317 Event::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
3318 (update_fee.as_ref().unwrap(), commitment_signed)
3320 _ => panic!("Unexpected event"),
3323 // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
3324 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap();
3325 let (bs_revoke_msg, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap();
3326 check_added_monitors!(nodes[1], 1);
3328 // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
3330 nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
3331 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
3333 // Create the (3) update_fee message that nodes[0] will generate before it does...
3334 let mut update_msg_2 = msgs::UpdateFee {
3335 channel_id: update_msg_1.channel_id.clone(),
3336 feerate_per_kw: (initial_feerate + 30) as u32,
3339 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
3341 update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
3343 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
3345 // Deliver (1), generating (3) and (4)
3346 let as_second_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap();
3347 check_added_monitors!(nodes[0], 1);
3348 assert!(as_second_update.as_ref().unwrap().update_add_htlcs.is_empty());
3349 assert!(as_second_update.as_ref().unwrap().update_fulfill_htlcs.is_empty());
3350 assert!(as_second_update.as_ref().unwrap().update_fail_htlcs.is_empty());
3351 assert!(as_second_update.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
3352 // Check that the update_fee newly generated matches what we delivered:
3353 assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
3354 assert_eq!(as_second_update.as_ref().unwrap().update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
3356 // Deliver (2) commitment_signed
3357 let (as_revoke_msg, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), bs_commitment_signed.as_ref().unwrap()).unwrap();
3358 check_added_monitors!(nodes[0], 1);
3359 assert!(as_commitment_signed.is_none());
3361 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap().is_none());
3362 check_added_monitors!(nodes[1], 1);
3365 let (bs_second_revoke, bs_second_commitment) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.unwrap().commitment_signed).unwrap();
3366 check_added_monitors!(nodes[1], 1);
3368 assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap().is_none());
3369 check_added_monitors!(nodes[0], 1);
3371 let (as_second_revoke, as_second_commitment) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment.unwrap()).unwrap();
3372 assert!(as_second_commitment.is_none());
3373 check_added_monitors!(nodes[0], 1);
3375 assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap().is_none());
3376 check_added_monitors!(nodes[1], 1);
3380 fn test_update_fee_vanilla() {
3381 let nodes = create_network(2);
3382 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3383 let channel_id = chan.2;
3385 macro_rules! get_feerate {
3387 let chan_lock = $node.node.channel_state.lock().unwrap();
3388 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3393 let feerate = get_feerate!(nodes[0]);
3394 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
3396 let events_0 = nodes[0].node.get_and_clear_pending_events();
3397 assert_eq!(events_0.len(), 1);
3398 let (update_msg, commitment_signed) = match events_0[0] {
3399 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3400 (update_fee.as_ref(), commitment_signed)
3402 _ => panic!("Unexpected event"),
3404 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3406 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3407 let commitment_signed = commitment_signed.unwrap();
3408 check_added_monitors!(nodes[0], 1);
3409 check_added_monitors!(nodes[1], 1);
3411 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3412 assert!(resp_option.is_none());
3413 check_added_monitors!(nodes[0], 1);
3415 let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
3416 assert!(commitment_signed.is_none());
3417 check_added_monitors!(nodes[0], 1);
3419 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
3420 assert!(resp_option.is_none());
3421 check_added_monitors!(nodes[1], 1);
3425 fn test_update_fee_with_fundee_update_add_htlc() {
3426 let mut nodes = create_network(2);
3427 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3428 let channel_id = chan.2;
3430 macro_rules! get_feerate {
3432 let chan_lock = $node.node.channel_state.lock().unwrap();
3433 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3439 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
3441 let feerate = get_feerate!(nodes[0]);
3442 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
3444 let events_0 = nodes[0].node.get_and_clear_pending_events();
3445 assert_eq!(events_0.len(), 1);
3446 let (update_msg, commitment_signed) = match events_0[0] {
3447 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3448 (update_fee.as_ref(), commitment_signed)
3450 _ => panic!("Unexpected event"),
3452 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3453 check_added_monitors!(nodes[0], 1);
3454 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3455 let commitment_signed = commitment_signed.unwrap();
3456 check_added_monitors!(nodes[1], 1);
3458 let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap();
3460 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]);
3462 // nothing happens since node[1] is in AwaitingRemoteRevoke
3463 nodes[1].node.send_payment(route, our_payment_hash).unwrap();
3465 let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
3466 assert_eq!(added_monitors.len(), 0);
3467 added_monitors.clear();
3469 let events = nodes[0].node.get_and_clear_pending_events();
3470 assert_eq!(events.len(), 0);
3471 // node[1] has nothing to do
3473 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3474 assert!(resp_option.is_none());
3475 check_added_monitors!(nodes[0], 1);
3477 let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
3478 assert!(commitment_signed.is_none());
3479 check_added_monitors!(nodes[0], 1);
3480 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
3481 // AwaitingRemoteRevoke ends here
3483 let commitment_update = resp_option.unwrap();
3484 assert_eq!(commitment_update.update_add_htlcs.len(), 1);
3485 assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
3486 assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
3487 assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
3488 assert_eq!(commitment_update.update_fee.is_none(), true);
3490 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap();
3491 let (revoke, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
3492 check_added_monitors!(nodes[0], 1);
3493 check_added_monitors!(nodes[1], 1);
3494 let commitment_signed = commitment_signed.unwrap();
3495 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap();
3496 check_added_monitors!(nodes[1], 1);
3497 assert!(resp_option.is_none());
3499 let (revoke, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap();
3500 check_added_monitors!(nodes[1], 1);
3501 assert!(commitment_signed.is_none());
3502 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap();
3503 check_added_monitors!(nodes[0], 1);
3504 assert!(resp_option.is_none());
3506 let events = nodes[0].node.get_and_clear_pending_events();
3507 assert_eq!(events.len(), 1);
3509 Event::PendingHTLCsForwardable { .. } => { },
3510 _ => panic!("Unexpected event"),
3512 nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
3513 nodes[0].node.process_pending_htlc_forwards();
3515 let events = nodes[0].node.get_and_clear_pending_events();
3516 assert_eq!(events.len(), 1);
3518 Event::PaymentReceived { .. } => { },
3519 _ => panic!("Unexpected event"),
3522 claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
3524 send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
3525 send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
3526 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
3530 fn test_update_fee() {
3531 let nodes = create_network(2);
3532 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
3533 let channel_id = chan.2;
3535 macro_rules! get_feerate {
3537 let chan_lock = $node.node.channel_state.lock().unwrap();
3538 let chan = chan_lock.by_id.get(&channel_id).unwrap();
3544 // (1) update_fee/commitment_signed ->
3545 // <- (2) revoke_and_ack
3546 // .- send (3) commitment_signed
3547 // (4) update_fee/commitment_signed ->
3548 // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
3549 // <- (3) commitment_signed delivered
3550 // send (6) revoke_and_ack -.
3551 // <- (5) deliver revoke_and_ack
3552 // (6) deliver revoke_and_ack ->
3553 // .- send (7) commitment_signed in response to (4)
3554 // <- (7) deliver commitment_signed
3555 // revoke_and_ack ->
3557 // Create and deliver (1)...
3558 let feerate = get_feerate!(nodes[0]);
3559 nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
3561 let events_0 = nodes[0].node.get_and_clear_pending_events();
3562 assert_eq!(events_0.len(), 1);
3563 let (update_msg, commitment_signed) = match events_0[0] {
3564 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3565 (update_fee.as_ref(), commitment_signed)
3567 _ => panic!("Unexpected event"),
3569 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3571 // Generate (2) and (3):
3572 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3573 let commitment_signed_0 = commitment_signed.unwrap();
3574 check_added_monitors!(nodes[0], 1);
3575 check_added_monitors!(nodes[1], 1);
3578 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3579 assert!(resp_option.is_none());
3580 check_added_monitors!(nodes[0], 1);
3582 // Create and deliver (4)...
3583 nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
3584 let events_0 = nodes[0].node.get_and_clear_pending_events();
3585 assert_eq!(events_0.len(), 1);
3586 let (update_msg, commitment_signed) = match events_0[0] {
3587 Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
3588 (update_fee.as_ref(), commitment_signed)
3590 _ => panic!("Unexpected event"),
3592 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
3594 let (revoke_msg, commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
3596 assert!(commitment_signed.is_none());
3597 check_added_monitors!(nodes[0], 1);
3598 check_added_monitors!(nodes[1], 1);
3600 // Handle (3), creating (6):
3601 let (revoke_msg_0, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap();
3602 assert!(commitment_signed.is_none());
3603 check_added_monitors!(nodes[0], 1);
3606 let resp_option = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
3607 assert!(resp_option.is_none());
3608 check_added_monitors!(nodes[0], 1);
3610 // Deliver (6), creating (7):
3611 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap();
3612 let commitment_signed = resp_option.unwrap().commitment_signed;
3613 check_added_monitors!(nodes[1], 1);
3616 let (revoke_msg, commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
3617 assert!(commitment_signed.is_none());
3618 check_added_monitors!(nodes[0], 1);
3619 let resp_option = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
3620 assert!(resp_option.is_none());
3621 check_added_monitors!(nodes[1], 1);
3623 assert_eq!(get_feerate!(nodes[0]), feerate + 30);
3624 assert_eq!(get_feerate!(nodes[1]), feerate + 30);
3625 close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
3629 fn fake_network_test() {
3630 // Simple test which builds a network of ChannelManagers, connects them to each other, and
3631 // tests that payments get routed and transactions broadcast in semi-reasonable ways.
3632 let nodes = create_network(4);
3634 // Create some initial channels
3635 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3636 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3637 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
3639 // Rebalance the network a bit by relaying one payment through all the channels...
3640 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3641 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3642 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3643 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
3645 // Send some more payments
3646 send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
3647 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
3648 send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
3650 // Test failure packets
3651 let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
3652 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
3654 // Add a new channel that skips 3
3655 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
3657 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
3658 send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
3659 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3660 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3661 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3662 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3663 send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
3665 // Do some rebalance loop payments, simultaneously
3666 let mut hops = Vec::with_capacity(3);
3667 hops.push(RouteHop {
3668 pubkey: nodes[2].node.get_our_node_id(),
3669 short_channel_id: chan_2.0.contents.short_channel_id,
3671 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
3673 hops.push(RouteHop {
3674 pubkey: nodes[3].node.get_our_node_id(),
3675 short_channel_id: chan_3.0.contents.short_channel_id,
3677 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
3679 hops.push(RouteHop {
3680 pubkey: nodes[1].node.get_our_node_id(),
3681 short_channel_id: chan_4.0.contents.short_channel_id,
3683 cltv_expiry_delta: TEST_FINAL_CLTV,
3685 hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
3686 hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
3687 let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
3689 let mut hops = Vec::with_capacity(3);
3690 hops.push(RouteHop {
3691 pubkey: nodes[3].node.get_our_node_id(),
3692 short_channel_id: chan_4.0.contents.short_channel_id,
3694 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
3696 hops.push(RouteHop {
3697 pubkey: nodes[2].node.get_our_node_id(),
3698 short_channel_id: chan_3.0.contents.short_channel_id,
3700 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
3702 hops.push(RouteHop {
3703 pubkey: nodes[1].node.get_our_node_id(),
3704 short_channel_id: chan_2.0.contents.short_channel_id,
3706 cltv_expiry_delta: TEST_FINAL_CLTV,
3708 hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
3709 hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
3710 let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
3712 // Claim the rebalances...
3713 fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
3714 claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
3716 // Add a duplicate new channel from 2 to 4
3717 let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
3719 // Send some payments across both channels
3720 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
3721 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
3722 let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
3724 route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
3726 //TODO: Test that routes work again here as we've been notified that the channel is full
3728 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
3729 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
3730 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
3732 // Close down the channels...
3733 close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
3734 close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
3735 close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
3736 close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
3737 close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
3741 fn duplicate_htlc_test() {
3742 // Test that we accept duplicate payment_hash HTLCs across the network and that
3743 // claiming/failing them are all separate and don't effect each other
3744 let mut nodes = create_network(6);
3746 // Create some initial channels to route via 3 to 4/5 from 0/1/2
3747 create_announced_chan_between_nodes(&nodes, 0, 3);
3748 create_announced_chan_between_nodes(&nodes, 1, 3);
3749 create_announced_chan_between_nodes(&nodes, 2, 3);
3750 create_announced_chan_between_nodes(&nodes, 3, 4);
3751 create_announced_chan_between_nodes(&nodes, 3, 5);
3753 let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
3755 *nodes[0].network_payment_count.borrow_mut() -= 1;
3756 assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
3758 *nodes[0].network_payment_count.borrow_mut() -= 1;
3759 assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
3761 claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
3762 fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
3763 claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
3766 #[derive(PartialEq)]
3767 enum HTLCType { NONE, TIMEOUT, SUCCESS }
3768 /// Tests that the given node has broadcast transactions for the given Channel
3770 /// First checks that the latest local commitment tx has been broadcast, unless an explicit
3771 /// commitment_tx is provided, which may be used to test that a remote commitment tx was
3772 /// broadcast and the revoked outputs were claimed.
3774 /// Next tests that there is (or is not) a transaction that spends the commitment transaction
3775 /// that appears to be the type of HTLC transaction specified in has_htlc_tx.
3777 /// All broadcast transactions must be accounted for in one of the above three types of we'll
3779 fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
3780 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
3781 assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
3783 let mut res = Vec::with_capacity(2);
3784 node_txn.retain(|tx| {
3785 if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
3786 check_spends!(tx, chan.3.clone());
3787 if commitment_tx.is_none() {
3788 res.push(tx.clone());
3793 if let Some(explicit_tx) = commitment_tx {
3794 res.push(explicit_tx.clone());
3797 assert_eq!(res.len(), 1);
3799 if has_htlc_tx != HTLCType::NONE {
3800 node_txn.retain(|tx| {
3801 if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
3802 check_spends!(tx, res[0].clone());
3803 if has_htlc_tx == HTLCType::TIMEOUT {
3804 assert!(tx.lock_time != 0);
3806 assert!(tx.lock_time == 0);
3808 res.push(tx.clone());
3812 assert_eq!(res.len(), 2);
3815 assert!(node_txn.is_empty());
3819 /// Tests that the given node has broadcast a claim transaction against the provided revoked
3820 /// HTLC transaction.
3821 fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
3822 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
3823 assert_eq!(node_txn.len(), 1);
3824 node_txn.retain(|tx| {
3825 if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
3826 check_spends!(tx, revoked_tx.clone());
3830 assert!(node_txn.is_empty());
3833 fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
3834 let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
3836 assert!(node_txn.len() >= 1);
3837 assert_eq!(node_txn[0].input.len(), 1);
3838 let mut found_prev = false;
3840 for tx in prev_txn {
3841 if node_txn[0].input[0].previous_output.txid == tx.txid() {
3842 check_spends!(node_txn[0], tx.clone());
3843 assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
3844 assert_eq!(tx.input.len(), 1); // must spend a commitment tx
3850 assert!(found_prev);
3852 let mut res = Vec::new();
3853 mem::swap(&mut *node_txn, &mut res);
3857 fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
3858 let events_1 = nodes[a].node.get_and_clear_pending_events();
3859 assert_eq!(events_1.len(), 1);
3860 let as_update = match events_1[0] {
3861 Event::BroadcastChannelUpdate { ref msg } => {
3864 _ => panic!("Unexpected event"),
3867 let events_2 = nodes[b].node.get_and_clear_pending_events();
3868 assert_eq!(events_2.len(), 1);
3869 let bs_update = match events_2[0] {
3870 Event::BroadcastChannelUpdate { ref msg } => {
3873 _ => panic!("Unexpected event"),
3877 node.router.handle_channel_update(&as_update).unwrap();
3878 node.router.handle_channel_update(&bs_update).unwrap();
3883 fn channel_monitor_network_test() {
3884 // Simple test which builds a network of ChannelManagers, connects them to each other, and
3885 // tests that ChannelMonitor is able to recover from various states.
3886 let nodes = create_network(5);
3888 // Create some initial channels
3889 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
3890 let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
3891 let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
3892 let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
3894 // Rebalance the network a bit by relaying one payment through all the channels...
3895 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
3896 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
3897 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
3898 send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
3900 // Simple case with no pending HTLCs:
3901 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
3903 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
3904 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3905 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
3906 test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
3908 get_announce_close_broadcast_events(&nodes, 0, 1);
3909 assert_eq!(nodes[0].node.list_channels().len(), 0);
3910 assert_eq!(nodes[1].node.list_channels().len(), 1);
3912 // One pending HTLC is discarded by the force-close:
3913 let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
3915 // Simple case of one pending HTLC to HTLC-Timeout
3916 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
3918 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
3919 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3920 nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
3921 test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
3923 get_announce_close_broadcast_events(&nodes, 1, 2);
3924 assert_eq!(nodes[1].node.list_channels().len(), 0);
3925 assert_eq!(nodes[2].node.list_channels().len(), 1);
3927 macro_rules! claim_funds {
3928 ($node: expr, $prev_node: expr, $preimage: expr) => {
3930 assert!($node.node.claim_funds($preimage));
3931 check_added_monitors!($node, 1);
3933 let events = $node.node.get_and_clear_pending_events();
3934 assert_eq!(events.len(), 1);
3936 Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
3937 assert!(update_add_htlcs.is_empty());
3938 assert!(update_fail_htlcs.is_empty());
3939 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
3941 _ => panic!("Unexpected event"),
3947 // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
3948 // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
3949 nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
3951 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
3953 // Claim the payment on nodes[3], giving it knowledge of the preimage
3954 claim_funds!(nodes[3], nodes[2], payment_preimage_1);
3956 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3957 nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
3959 check_preimage_claim(&nodes[3], &node_txn);
3961 get_announce_close_broadcast_events(&nodes, 2, 3);
3962 assert_eq!(nodes[2].node.list_channels().len(), 0);
3963 assert_eq!(nodes[3].node.list_channels().len(), 1);
3965 // One pending HTLC to time out:
3966 let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
3969 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3970 nodes[3].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
3971 for i in 2..TEST_FINAL_CLTV - 3 {
3972 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3973 nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
3976 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
3978 // Claim the payment on nodes[4], giving it knowledge of the preimage
3979 claim_funds!(nodes[4], nodes[3], payment_preimage_2);
3981 header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3982 nodes[4].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
3983 for i in 2..TEST_FINAL_CLTV - 3 {
3984 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3985 nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
3988 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
3990 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3991 nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
3993 check_preimage_claim(&nodes[4], &node_txn);
3995 get_announce_close_broadcast_events(&nodes, 3, 4);
3996 assert_eq!(nodes[3].node.list_channels().len(), 0);
3997 assert_eq!(nodes[4].node.list_channels().len(), 0);
3999 // Create some new channels:
4000 let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
4002 // A pending HTLC which will be revoked:
4003 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4004 // Get the will-be-revoked local txn from nodes[0]
4005 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
4006 assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
4007 assert_eq!(revoked_local_txn[0].input.len(), 1);
4008 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
4009 assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
4010 assert_eq!(revoked_local_txn[1].input.len(), 1);
4011 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
4012 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
4013 // Revoke the old state
4014 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
4017 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4018 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4020 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4021 assert_eq!(node_txn.len(), 3);
4022 assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
4023 assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
4025 check_spends!(node_txn[0], revoked_local_txn[0].clone());
4026 node_txn.swap_remove(0);
4028 test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
4030 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4031 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
4032 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4033 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
4034 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
4036 get_announce_close_broadcast_events(&nodes, 0, 1);
4037 assert_eq!(nodes[0].node.list_channels().len(), 0);
4038 assert_eq!(nodes[1].node.list_channels().len(), 0);
4042 fn revoked_output_claim() {
4043 // Simple test to ensure a node will claim a revoked output when a stale remote commitment
4044 // transaction is broadcast by its counterparty
4045 let nodes = create_network(2);
4046 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4047 // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
4048 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
4049 assert_eq!(revoked_local_txn.len(), 1);
4050 // Only output is the full channel value back to nodes[0]:
4051 assert_eq!(revoked_local_txn[0].output.len(), 1);
4052 // Send a payment through, updating everyone's latest commitment txn
4053 send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
4055 // Inform nodes[1] that nodes[0] broadcast a stale tx
4056 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4057 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4058 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4059 assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
4061 assert_eq!(node_txn[0], node_txn[2]);
4063 check_spends!(node_txn[0], revoked_local_txn[0].clone());
4064 check_spends!(node_txn[1], chan_1.3.clone());
4066 // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
4067 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4068 get_announce_close_broadcast_events(&nodes, 0, 1);
4072 fn claim_htlc_outputs_shared_tx() {
4073 // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
4074 let nodes = create_network(2);
4076 // Create some new channel:
4077 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4079 // Rebalance the network to generate htlc in the two directions
4080 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4081 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
4082 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4083 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
4085 // Get the will-be-revoked local txn from node[0]
4086 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
4087 assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
4088 assert_eq!(revoked_local_txn[0].input.len(), 1);
4089 assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
4090 assert_eq!(revoked_local_txn[1].input.len(), 1);
4091 assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
4092 assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
4093 check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
4095 //Revoke the old state
4096 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
4099 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4101 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4103 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
4104 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4105 assert_eq!(node_txn.len(), 4);
4107 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
4108 check_spends!(node_txn[0], revoked_local_txn[0].clone());
4110 assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
4112 let mut witness_lens = BTreeSet::new();
4113 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
4114 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
4115 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
4116 assert_eq!(witness_lens.len(), 3);
4117 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
4118 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
4119 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
4121 // Next nodes[1] broadcasts its current local tx state:
4122 assert_eq!(node_txn[1].input.len(), 1);
4123 assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
4125 assert_eq!(node_txn[2].input.len(), 1);
4126 let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
4127 assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
4128 assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
4129 assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
4130 assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
4132 get_announce_close_broadcast_events(&nodes, 0, 1);
4133 assert_eq!(nodes[0].node.list_channels().len(), 0);
4134 assert_eq!(nodes[1].node.list_channels().len(), 0);
4138 fn claim_htlc_outputs_single_tx() {
4139 // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
4140 let nodes = create_network(2);
4142 let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
4144 // Rebalance the network to generate htlc in the two directions
4145 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
4146 // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
4147 // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
4148 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4149 let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
4151 // Get the will-be-revoked local txn from node[0]
4152 let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
4154 //Revoke the old state
4155 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
4158 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4160 nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
4162 nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
4163 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4164 assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan)
4166 assert_eq!(node_txn[0], node_txn[7]);
4167 assert_eq!(node_txn[1], node_txn[8]);
4168 assert_eq!(node_txn[2], node_txn[9]);
4169 assert_eq!(node_txn[3], node_txn[10]);
4170 assert_eq!(node_txn[4], node_txn[11]);
4171 assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger
4172 assert_eq!(node_txn[4], node_txn[6]);
4174 assert_eq!(node_txn[0].input.len(), 1);
4175 assert_eq!(node_txn[1].input.len(), 1);
4176 assert_eq!(node_txn[2].input.len(), 1);
4178 let mut revoked_tx_map = HashMap::new();
4179 revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone());
4180 node_txn[0].verify(&revoked_tx_map).unwrap();
4181 node_txn[1].verify(&revoked_tx_map).unwrap();
4182 node_txn[2].verify(&revoked_tx_map).unwrap();
4184 let mut witness_lens = BTreeSet::new();
4185 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
4186 witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
4187 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
4188 assert_eq!(witness_lens.len(), 3);
4189 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
4190 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
4191 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
4193 assert_eq!(node_txn[3].input.len(), 1);
4194 check_spends!(node_txn[3], chan_1.3.clone());
4196 assert_eq!(node_txn[4].input.len(), 1);
4197 let witness_script = node_txn[4].input[0].witness.last().unwrap();
4198 assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
4199 assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
4200 assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
4201 assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
4203 get_announce_close_broadcast_events(&nodes, 0, 1);
4204 assert_eq!(nodes[0].node.list_channels().len(), 0);
4205 assert_eq!(nodes[1].node.list_channels().len(), 0);
4209 fn test_htlc_ignore_latest_remote_commitment() {
4210 // Test that HTLC transactions spending the latest remote commitment transaction are simply
4211 // ignored if we cannot claim them. This originally tickled an invalid unwrap().
4212 let nodes = create_network(2);
4213 create_announced_chan_between_nodes(&nodes, 0, 1);
4215 route_payment(&nodes[0], &[&nodes[1]], 10000000);
4216 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
4218 let events = nodes[0].node.get_and_clear_pending_events();
4219 assert_eq!(events.len(), 1);
4221 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4222 assert_eq!(flags & 0b10, 0b10);
4224 _ => panic!("Unexpected event"),
4228 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
4229 assert_eq!(node_txn.len(), 2);
4231 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4232 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
4235 let events = nodes[1].node.get_and_clear_pending_events();
4236 assert_eq!(events.len(), 1);
4238 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4239 assert_eq!(flags & 0b10, 0b10);
4241 _ => panic!("Unexpected event"),
4245 // Duplicate the block_connected call since this may happen due to other listeners
4246 // registering new transactions
4247 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
4251 fn test_force_close_fail_back() {
4252 // Check which HTLCs are failed-backwards on channel force-closure
4253 let mut nodes = create_network(3);
4254 create_announced_chan_between_nodes(&nodes, 0, 1);
4255 create_announced_chan_between_nodes(&nodes, 1, 2);
4257 let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
4259 let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
4261 let mut payment_event = {
4262 nodes[0].node.send_payment(route, our_payment_hash).unwrap();
4263 check_added_monitors!(nodes[0], 1);
4265 let mut events = nodes[0].node.get_and_clear_pending_events();
4266 assert_eq!(events.len(), 1);
4267 SendEvent::from_event(events.remove(0))
4270 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4271 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4273 let events_1 = nodes[1].node.get_and_clear_pending_events();
4274 assert_eq!(events_1.len(), 1);
4276 Event::PendingHTLCsForwardable { .. } => { },
4277 _ => panic!("Unexpected event"),
4280 nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
4281 nodes[1].node.process_pending_htlc_forwards();
4283 let mut events_2 = nodes[1].node.get_and_clear_pending_events();
4284 assert_eq!(events_2.len(), 1);
4285 payment_event = SendEvent::from_event(events_2.remove(0));
4286 assert_eq!(payment_event.msgs.len(), 1);
4288 check_added_monitors!(nodes[1], 1);
4289 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
4290 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
4291 check_added_monitors!(nodes[2], 1);
4293 // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
4294 // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
4295 // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
4297 nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
4298 let events_3 = nodes[2].node.get_and_clear_pending_events();
4299 assert_eq!(events_3.len(), 1);
4301 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4302 assert_eq!(flags & 0b10, 0b10);
4304 _ => panic!("Unexpected event"),
4308 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
4309 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
4310 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
4311 // back to nodes[1] upon timeout otherwise.
4312 assert_eq!(node_txn.len(), 1);
4316 let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4317 nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
4319 let events_4 = nodes[1].node.get_and_clear_pending_events();
4320 // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
4321 assert_eq!(events_4.len(), 1);
4323 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4324 assert_eq!(flags & 0b10, 0b10);
4326 _ => panic!("Unexpected event"),
4329 // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
4331 let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
4332 monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap()
4333 .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
4335 nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
4336 let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
4337 assert_eq!(node_txn.len(), 1);
4338 assert_eq!(node_txn[0].input.len(), 1);
4339 assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
4340 assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
4341 assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
4343 check_spends!(node_txn[0], tx);
4347 fn test_unconf_chan() {
4348 // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
4349 let nodes = create_network(2);
4350 create_announced_chan_between_nodes(&nodes, 0, 1);
4352 let channel_state = nodes[0].node.channel_state.lock().unwrap();
4353 assert_eq!(channel_state.by_id.len(), 1);
4354 assert_eq!(channel_state.short_to_id.len(), 1);
4355 mem::drop(channel_state);
4357 let mut headers = Vec::new();
4358 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4359 headers.push(header.clone());
4361 header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4362 headers.push(header.clone());
4364 while !headers.is_empty() {
4365 nodes[0].node.block_disconnected(&headers.pop().unwrap());
4368 let events = nodes[0].node.get_and_clear_pending_events();
4369 assert_eq!(events.len(), 1);
4371 Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
4372 assert_eq!(flags & 0b10, 0b10);
4374 _ => panic!("Unexpected event"),
4377 let channel_state = nodes[0].node.channel_state.lock().unwrap();
4378 assert_eq!(channel_state.by_id.len(), 0);
4379 assert_eq!(channel_state.short_to_id.len(), 0);
4382 fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize)) {
4383 let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id());
4384 let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id());
4386 let mut resp_1 = Vec::new();
4387 for msg in reestablish_1 {
4388 resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap());
4390 if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 {
4391 check_added_monitors!(node_b, 1);
4393 check_added_monitors!(node_b, 0);
4396 let mut resp_2 = Vec::new();
4397 for msg in reestablish_2 {
4398 resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap());
4400 if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 {
4401 check_added_monitors!(node_a, 1);
4403 check_added_monitors!(node_a, 0);
4406 // We dont yet support both needing updates, as that would require a different commitment dance:
4407 assert!((pending_htlc_claims.0 == 0 && pending_htlc_fails.0 == 0) || (pending_htlc_claims.1 == 0 && pending_htlc_fails.1 == 0));
4409 for chan_msgs in resp_1.drain(..) {
4411 let _announcement_sigs_opt = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
4412 //TODO: Test announcement_sigs re-sending when we've implemented it
4414 assert!(chan_msgs.0.is_none());
4416 assert!(chan_msgs.1.is_none());
4417 if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 {
4418 let commitment_update = chan_msgs.2.unwrap();
4419 assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected
4420 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0);
4421 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0);
4422 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
4423 for update_fulfill in commitment_update.update_fulfill_htlcs {
4424 node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
4426 for update_fail in commitment_update.update_fail_htlcs {
4427 node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
4430 commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
4432 assert!(chan_msgs.2.is_none());
4436 for chan_msgs in resp_2.drain(..) {
4438 let _announcement_sigs_opt = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
4439 //TODO: Test announcement_sigs re-sending when we've implemented it
4441 assert!(chan_msgs.0.is_none());
4443 assert!(chan_msgs.1.is_none());
4444 if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 {
4445 let commitment_update = chan_msgs.2.unwrap();
4446 assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected
4447 assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0);
4448 assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0);
4449 assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
4450 for update_fulfill in commitment_update.update_fulfill_htlcs {
4451 node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
4453 for update_fail in commitment_update.update_fail_htlcs {
4454 node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
4457 commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
4459 assert!(chan_msgs.2.is_none());
4465 fn test_simple_peer_disconnect() {
4466 // Test that we can reconnect when there are no lost messages
4467 let nodes = create_network(3);
4468 create_announced_chan_between_nodes(&nodes, 0, 1);
4469 create_announced_chan_between_nodes(&nodes, 1, 2);
4471 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4472 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4473 reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0));
4475 let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
4476 let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
4477 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
4478 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
4480 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4481 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4482 reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0));
4484 let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
4485 let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
4486 let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
4487 let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
4489 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4490 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4492 claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
4493 fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
4495 reconnect_nodes(&nodes[0], &nodes[1], false, (1, 0), (1, 0));
4497 let events = nodes[0].node.get_and_clear_pending_events();
4498 assert_eq!(events.len(), 2);
4500 Event::PaymentSent { payment_preimage } => {
4501 assert_eq!(payment_preimage, payment_preimage_3);
4503 _ => panic!("Unexpected event"),
4506 Event::PaymentFailed { payment_hash } => {
4507 assert_eq!(payment_hash, payment_hash_5);
4509 _ => panic!("Unexpected event"),
4513 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
4514 fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
4518 fn test_invalid_channel_announcement() {
4519 //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
4520 let secp_ctx = Secp256k1::new();
4521 let nodes = create_network(2);
4523 let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
4525 let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
4526 let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
4527 let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
4528 let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
4530 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
4532 let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
4533 let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
4535 let as_network_key = nodes[0].node.get_our_node_id();
4536 let bs_network_key = nodes[1].node.get_our_node_id();
4538 let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
4540 let mut chan_announcement;
4542 macro_rules! dummy_unsigned_msg {
4544 msgs::UnsignedChannelAnnouncement {
4545 features: msgs::GlobalFeatures::new(),
4546 chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
4547 short_channel_id: as_chan.get_short_channel_id().unwrap(),
4548 node_id_1: if were_node_one { as_network_key } else { bs_network_key },
4549 node_id_2: if were_node_one { bs_network_key } else { as_network_key },
4550 bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
4551 bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
4552 excess_data: Vec::new(),
4557 macro_rules! sign_msg {
4558 ($unsigned_msg: expr) => {
4559 let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
4560 let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
4561 let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
4562 let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key);
4563 let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key);
4564 chan_announcement = msgs::ChannelAnnouncement {
4565 node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
4566 node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
4567 bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
4568 bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
4569 contents: $unsigned_msg
4574 let unsigned_msg = dummy_unsigned_msg!();
4575 sign_msg!(unsigned_msg);
4576 assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
4577 let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
4579 // Configured with Network::Testnet
4580 let mut unsigned_msg = dummy_unsigned_msg!();
4581 unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
4582 sign_msg!(unsigned_msg);
4583 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
4585 let mut unsigned_msg = dummy_unsigned_msg!();
4586 unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
4587 sign_msg!(unsigned_msg);
4588 assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());